var/home/core/zuul-output/0000755000175000017500000000000015111266206014525 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111310107015457 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000006625400415111310075017676 0ustar rootrootNov 25 08:48:58 crc systemd[1]: Starting Kubernetes Kubelet... Nov 25 08:48:58 crc restorecon[4770]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:58 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:59 crc restorecon[4770]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 08:48:59 crc restorecon[4770]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 25 08:49:00 crc kubenswrapper[4932]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 08:49:00 crc kubenswrapper[4932]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 25 08:49:00 crc kubenswrapper[4932]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 08:49:00 crc kubenswrapper[4932]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 08:49:00 crc kubenswrapper[4932]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 25 08:49:00 crc kubenswrapper[4932]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.338172 4932 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346394 4932 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346427 4932 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346443 4932 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346452 4932 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346460 4932 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346469 4932 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346477 4932 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346486 4932 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346495 4932 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346503 4932 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346511 4932 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346519 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346527 4932 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346535 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346542 4932 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346550 4932 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346558 4932 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346569 4932 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346579 4932 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346588 4932 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346596 4932 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346604 4932 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346614 4932 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346622 4932 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346639 4932 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346648 4932 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346656 4932 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346678 4932 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346701 4932 feature_gate.go:330] unrecognized feature gate: Example Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346709 4932 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346718 4932 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346729 4932 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346739 4932 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346747 4932 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346754 4932 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346762 4932 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346769 4932 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346777 4932 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346785 4932 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346794 4932 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346802 4932 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346809 4932 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346817 4932 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346826 4932 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346833 4932 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346841 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346848 4932 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346856 4932 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346864 4932 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346871 4932 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346879 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346887 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346895 4932 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346903 4932 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346913 4932 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346921 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346929 4932 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346937 4932 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346945 4932 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346956 4932 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346964 4932 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346972 4932 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346979 4932 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346988 4932 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.346995 4932 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.347007 4932 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.347017 4932 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.347026 4932 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.347036 4932 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.347046 4932 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.347054 4932 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348787 4932 flags.go:64] FLAG: --address="0.0.0.0" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348811 4932 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348835 4932 flags.go:64] FLAG: --anonymous-auth="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348847 4932 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348858 4932 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348867 4932 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348878 4932 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348890 4932 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348900 4932 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348911 4932 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348922 4932 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348933 4932 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348944 4932 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348953 4932 flags.go:64] FLAG: --cgroup-root="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348962 4932 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348971 4932 flags.go:64] FLAG: --client-ca-file="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348980 4932 flags.go:64] FLAG: --cloud-config="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348989 4932 flags.go:64] FLAG: --cloud-provider="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.348998 4932 flags.go:64] FLAG: --cluster-dns="[]" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349009 4932 flags.go:64] FLAG: --cluster-domain="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349019 4932 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349028 4932 flags.go:64] FLAG: --config-dir="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349037 4932 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349047 4932 flags.go:64] FLAG: --container-log-max-files="5" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349059 4932 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349068 4932 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349077 4932 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349086 4932 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349095 4932 flags.go:64] FLAG: --contention-profiling="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349104 4932 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349113 4932 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349124 4932 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349133 4932 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349144 4932 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349153 4932 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349162 4932 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349171 4932 flags.go:64] FLAG: --enable-load-reader="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349180 4932 flags.go:64] FLAG: --enable-server="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349217 4932 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349231 4932 flags.go:64] FLAG: --event-burst="100" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349240 4932 flags.go:64] FLAG: --event-qps="50" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349249 4932 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349259 4932 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349268 4932 flags.go:64] FLAG: --eviction-hard="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349280 4932 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349289 4932 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349298 4932 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349307 4932 flags.go:64] FLAG: --eviction-soft="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349316 4932 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349325 4932 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349334 4932 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349342 4932 flags.go:64] FLAG: --experimental-mounter-path="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349351 4932 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349360 4932 flags.go:64] FLAG: --fail-swap-on="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349369 4932 flags.go:64] FLAG: --feature-gates="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349380 4932 flags.go:64] FLAG: --file-check-frequency="20s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349390 4932 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349399 4932 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349408 4932 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349417 4932 flags.go:64] FLAG: --healthz-port="10248" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349426 4932 flags.go:64] FLAG: --help="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349435 4932 flags.go:64] FLAG: --hostname-override="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349444 4932 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349453 4932 flags.go:64] FLAG: --http-check-frequency="20s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349462 4932 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349470 4932 flags.go:64] FLAG: --image-credential-provider-config="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349479 4932 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349489 4932 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349498 4932 flags.go:64] FLAG: --image-service-endpoint="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349507 4932 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349517 4932 flags.go:64] FLAG: --kube-api-burst="100" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349527 4932 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349537 4932 flags.go:64] FLAG: --kube-api-qps="50" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349547 4932 flags.go:64] FLAG: --kube-reserved="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349556 4932 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349565 4932 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349576 4932 flags.go:64] FLAG: --kubelet-cgroups="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349585 4932 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349595 4932 flags.go:64] FLAG: --lock-file="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349603 4932 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349612 4932 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349622 4932 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349634 4932 flags.go:64] FLAG: --log-json-split-stream="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349643 4932 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349652 4932 flags.go:64] FLAG: --log-text-split-stream="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349661 4932 flags.go:64] FLAG: --logging-format="text" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349701 4932 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349713 4932 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349722 4932 flags.go:64] FLAG: --manifest-url="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349732 4932 flags.go:64] FLAG: --manifest-url-header="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349744 4932 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349754 4932 flags.go:64] FLAG: --max-open-files="1000000" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349765 4932 flags.go:64] FLAG: --max-pods="110" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349775 4932 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349784 4932 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349793 4932 flags.go:64] FLAG: --memory-manager-policy="None" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349802 4932 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349812 4932 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349821 4932 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349830 4932 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349850 4932 flags.go:64] FLAG: --node-status-max-images="50" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349859 4932 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349868 4932 flags.go:64] FLAG: --oom-score-adj="-999" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349877 4932 flags.go:64] FLAG: --pod-cidr="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349886 4932 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349901 4932 flags.go:64] FLAG: --pod-manifest-path="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349910 4932 flags.go:64] FLAG: --pod-max-pids="-1" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349920 4932 flags.go:64] FLAG: --pods-per-core="0" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349929 4932 flags.go:64] FLAG: --port="10250" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349942 4932 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349951 4932 flags.go:64] FLAG: --provider-id="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349960 4932 flags.go:64] FLAG: --qos-reserved="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349970 4932 flags.go:64] FLAG: --read-only-port="10255" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349980 4932 flags.go:64] FLAG: --register-node="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349989 4932 flags.go:64] FLAG: --register-schedulable="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.349998 4932 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350013 4932 flags.go:64] FLAG: --registry-burst="10" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350022 4932 flags.go:64] FLAG: --registry-qps="5" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350031 4932 flags.go:64] FLAG: --reserved-cpus="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350040 4932 flags.go:64] FLAG: --reserved-memory="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350051 4932 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350060 4932 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350069 4932 flags.go:64] FLAG: --rotate-certificates="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350078 4932 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350087 4932 flags.go:64] FLAG: --runonce="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350096 4932 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350106 4932 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350115 4932 flags.go:64] FLAG: --seccomp-default="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350124 4932 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350133 4932 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350142 4932 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350153 4932 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350162 4932 flags.go:64] FLAG: --storage-driver-password="root" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350173 4932 flags.go:64] FLAG: --storage-driver-secure="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350182 4932 flags.go:64] FLAG: --storage-driver-table="stats" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350218 4932 flags.go:64] FLAG: --storage-driver-user="root" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350227 4932 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350237 4932 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350246 4932 flags.go:64] FLAG: --system-cgroups="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350256 4932 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350270 4932 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350284 4932 flags.go:64] FLAG: --tls-cert-file="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350293 4932 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350305 4932 flags.go:64] FLAG: --tls-min-version="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350314 4932 flags.go:64] FLAG: --tls-private-key-file="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350324 4932 flags.go:64] FLAG: --topology-manager-policy="none" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350333 4932 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350342 4932 flags.go:64] FLAG: --topology-manager-scope="container" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350352 4932 flags.go:64] FLAG: --v="2" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350365 4932 flags.go:64] FLAG: --version="false" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350377 4932 flags.go:64] FLAG: --vmodule="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350391 4932 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.350402 4932 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350601 4932 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350612 4932 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350620 4932 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350628 4932 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350636 4932 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350644 4932 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350652 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350660 4932 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350668 4932 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350676 4932 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350686 4932 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350695 4932 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350704 4932 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350711 4932 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350722 4932 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350732 4932 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350740 4932 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350751 4932 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350759 4932 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350767 4932 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350780 4932 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350790 4932 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350798 4932 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350806 4932 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350814 4932 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350821 4932 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350829 4932 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350836 4932 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350846 4932 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350854 4932 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350862 4932 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350869 4932 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350877 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350885 4932 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350893 4932 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350900 4932 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350908 4932 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350915 4932 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350923 4932 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350931 4932 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350938 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350946 4932 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350953 4932 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350964 4932 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350972 4932 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350980 4932 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350990 4932 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.350998 4932 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351007 4932 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351016 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351025 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351033 4932 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351043 4932 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351051 4932 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351060 4932 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351067 4932 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351075 4932 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351083 4932 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351091 4932 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351099 4932 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351107 4932 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351114 4932 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351122 4932 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351129 4932 feature_gate.go:330] unrecognized feature gate: Example Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351139 4932 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351146 4932 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351154 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351162 4932 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351169 4932 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351177 4932 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.351185 4932 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.351234 4932 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.361943 4932 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.361995 4932 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362071 4932 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362081 4932 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362087 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362092 4932 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362096 4932 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362100 4932 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362105 4932 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362111 4932 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362118 4932 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362123 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362128 4932 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362132 4932 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362137 4932 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362141 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362146 4932 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362150 4932 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362154 4932 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362158 4932 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362164 4932 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362171 4932 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362176 4932 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362180 4932 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362184 4932 feature_gate.go:330] unrecognized feature gate: Example Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362205 4932 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362210 4932 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362214 4932 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362219 4932 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362223 4932 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362229 4932 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362233 4932 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362237 4932 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362242 4932 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362247 4932 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362252 4932 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362261 4932 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362265 4932 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362269 4932 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362274 4932 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362278 4932 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362284 4932 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362288 4932 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362294 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362299 4932 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362303 4932 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362308 4932 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362314 4932 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362318 4932 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362322 4932 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362326 4932 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362330 4932 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362334 4932 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362338 4932 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362342 4932 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362347 4932 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362351 4932 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362355 4932 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362359 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362364 4932 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362369 4932 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362375 4932 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362380 4932 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362385 4932 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362390 4932 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362394 4932 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362399 4932 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362404 4932 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362408 4932 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362413 4932 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362417 4932 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362421 4932 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362427 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.362437 4932 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362569 4932 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362579 4932 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362584 4932 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362590 4932 feature_gate.go:330] unrecognized feature gate: Example Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362595 4932 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362599 4932 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362604 4932 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362609 4932 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362614 4932 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362620 4932 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362625 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362629 4932 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362635 4932 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362641 4932 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362646 4932 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362651 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362655 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362660 4932 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362666 4932 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362672 4932 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362677 4932 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362682 4932 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362688 4932 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362693 4932 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362698 4932 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362704 4932 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362708 4932 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362712 4932 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362716 4932 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362719 4932 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362723 4932 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362727 4932 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362731 4932 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362734 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362739 4932 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362743 4932 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362746 4932 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362750 4932 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362753 4932 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362757 4932 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362760 4932 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362764 4932 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362767 4932 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362772 4932 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362776 4932 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362780 4932 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362784 4932 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362788 4932 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362792 4932 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362796 4932 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362800 4932 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362803 4932 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362808 4932 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362812 4932 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362816 4932 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362821 4932 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362825 4932 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362828 4932 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362832 4932 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362835 4932 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362839 4932 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362842 4932 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362846 4932 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362850 4932 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362854 4932 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362857 4932 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362861 4932 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362865 4932 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362869 4932 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362873 4932 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.362878 4932 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.362885 4932 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.363780 4932 server.go:940] "Client rotation is on, will bootstrap in background" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.370349 4932 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.370449 4932 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.372538 4932 server.go:997] "Starting client certificate rotation" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.372588 4932 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.372757 4932 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-27 16:14:00.979710507 +0000 UTC Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.372829 4932 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 55h25m0.606884241s for next certificate rotation Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.410592 4932 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.412231 4932 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.428656 4932 log.go:25] "Validated CRI v1 runtime API" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.471077 4932 log.go:25] "Validated CRI v1 image API" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.472860 4932 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.478579 4932 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-25-08-43-57-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.478602 4932 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.495551 4932 manager.go:217] Machine: {Timestamp:2025-11-25 08:49:00.49117025 +0000 UTC m=+0.617199883 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:fbb2a061-2abc-4717-831d-47e83fc0993f BootID:62bd1c5f-ae99-478e-b19e-e49920d66581 Filesystems:[{Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:46:f8:fc Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:46:f8:fc Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:50:31:5f Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:5a:4b:f3 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:b8:5c:cc Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:47:2a:1f Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:93:a4:a2 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:16:35:57:aa:cc:23 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:0e:27:8d:09:f8:15 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.495934 4932 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.496103 4932 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.498260 4932 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.498567 4932 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.498627 4932 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.498945 4932 topology_manager.go:138] "Creating topology manager with none policy" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.498967 4932 container_manager_linux.go:303] "Creating device plugin manager" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.499603 4932 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.499643 4932 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.507276 4932 state_mem.go:36] "Initialized new in-memory state store" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.507365 4932 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.523916 4932 kubelet.go:418] "Attempting to sync node with API server" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.523934 4932 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.523957 4932 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.523969 4932 kubelet.go:324] "Adding apiserver pod source" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.523980 4932 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.528773 4932 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.529479 4932 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.531083 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.531207 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.531162 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.531290 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.532353 4932 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.535482 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.536033 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.536047 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.536057 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.536074 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.536086 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.536096 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.536112 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.536123 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.536134 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.536148 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.536158 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.537327 4932 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.538099 4932 server.go:1280] "Started kubelet" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.539580 4932 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:00 crc systemd[1]: Started Kubernetes Kubelet. Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.540155 4932 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.540165 4932 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.540774 4932 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.541735 4932 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.541778 4932 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.542652 4932 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 04:47:40.478095848 +0000 UTC Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.542752 4932 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1051h58m39.935349106s for next certificate rotation Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.542984 4932 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.543006 4932 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.543093 4932 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.543140 4932 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.543681 4932 server.go:460] "Adding debug handlers to kubelet server" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.544325 4932 factory.go:55] Registering systemd factory Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.544364 4932 factory.go:221] Registration of the systemd container factory successfully Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.544736 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.544790 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.545423 4932 factory.go:153] Registering CRI-O factory Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.545436 4932 factory.go:221] Registration of the crio container factory successfully Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.545493 4932 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.545515 4932 factory.go:103] Registering Raw factory Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.545531 4932 manager.go:1196] Started watching for new ooms in manager Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.545692 4932 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" interval="200ms" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.546140 4932 manager.go:319] Starting recovery of all containers Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.550055 4932 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.77:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b33b7190deb60 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 08:49:00.53805552 +0000 UTC m=+0.664085103,LastTimestamp:2025-11-25 08:49:00.53805552 +0000 UTC m=+0.664085103,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.562630 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.563967 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.563989 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564003 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564021 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564032 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564062 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564089 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564111 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564135 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564152 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564180 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564226 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564246 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564260 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564276 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564294 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564321 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564344 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564360 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564375 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564387 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564398 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564412 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564427 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564444 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564458 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564476 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564491 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564507 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564517 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564532 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564545 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564558 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564574 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564587 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564601 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564614 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564624 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564640 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564652 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564664 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564679 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564691 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564708 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564718 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564729 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564743 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564756 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564769 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564779 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564790 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564807 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564825 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564836 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564850 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564864 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564889 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564903 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564915 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564929 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564944 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564954 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564965 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564975 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.564986 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565000 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565053 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565066 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565080 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565091 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565103 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565112 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565122 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565135 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565146 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565159 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565170 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565179 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565207 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565218 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565232 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565243 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565254 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565266 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565275 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565291 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565303 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565319 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565334 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565346 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565458 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565554 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565664 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565715 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565748 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565790 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565824 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.565899 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566013 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566053 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566093 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566134 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566246 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566306 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566361 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566416 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566462 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566523 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566568 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566624 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566669 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566717 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566750 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566792 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566822 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566854 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566895 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566928 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.566968 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567044 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567077 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567183 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567272 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567306 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567346 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567380 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567410 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567447 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567487 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567519 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567580 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567611 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567649 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567680 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567724 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567784 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567815 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.567853 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574219 4932 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574310 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574352 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574379 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574415 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574435 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574501 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574520 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574539 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574574 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574892 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574922 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574944 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574964 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.574987 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575008 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575030 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575052 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575072 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575092 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575114 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575132 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575152 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575170 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575216 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575236 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575254 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575279 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575298 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575318 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575340 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575359 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575378 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575398 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575423 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575448 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575474 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575541 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575594 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575624 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575646 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575666 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575687 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575707 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575725 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575745 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575763 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575782 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575804 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575823 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575842 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575881 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575900 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575920 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575941 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575962 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.575983 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576001 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576019 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576041 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576061 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576079 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576101 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576120 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576143 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576176 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576249 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576280 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576300 4932 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576319 4932 reconstruct.go:97] "Volume reconstruction finished" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.576335 4932 reconciler.go:26] "Reconciler: start to sync state" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.582519 4932 manager.go:324] Recovery completed Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.595339 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.597466 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.597525 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.597540 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.598571 4932 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.598587 4932 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.598605 4932 state_mem.go:36] "Initialized new in-memory state store" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.602581 4932 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.604591 4932 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.604638 4932 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.604677 4932 kubelet.go:2335] "Starting kubelet main sync loop" Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.604760 4932 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 25 08:49:00 crc kubenswrapper[4932]: W1125 08:49:00.605418 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.605474 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.612917 4932 policy_none.go:49] "None policy: Start" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.613881 4932 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.613923 4932 state_mem.go:35] "Initializing new in-memory state store" Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.643319 4932 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.705675 4932 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.729647 4932 manager.go:334] "Starting Device Plugin manager" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.730034 4932 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.730058 4932 server.go:79] "Starting device plugin registration server" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.730587 4932 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.730611 4932 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.730780 4932 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.730888 4932 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.730902 4932 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.739141 4932 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.747340 4932 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" interval="400ms" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.831291 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.832713 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.832754 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.832767 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.832795 4932 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 08:49:00 crc kubenswrapper[4932]: E1125 08:49:00.833540 4932 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.77:6443: connect: connection refused" node="crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.906800 4932 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.906918 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.908604 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.908646 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.908659 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.908775 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.909148 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.909219 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.909646 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.909662 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.909670 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.909808 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.909873 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.909912 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910443 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910467 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910475 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910488 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910511 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910520 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910552 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910773 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910870 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910933 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910965 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.910977 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.911224 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.911248 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.911262 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.911376 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.911484 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.911509 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.912142 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.912164 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.912173 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.912150 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.912232 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.912242 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.912393 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.912444 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.913234 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.913297 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.913317 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.918336 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.918361 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.918369 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981539 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981587 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981608 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981624 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981643 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981657 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981672 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981688 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981703 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981771 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981857 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981879 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981900 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981944 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:00 crc kubenswrapper[4932]: I1125 08:49:00.981995 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.034088 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.035665 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.035720 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.035741 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.035776 4932 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 08:49:01 crc kubenswrapper[4932]: E1125 08:49:01.036404 4932 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.77:6443: connect: connection refused" node="crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083233 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083333 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083397 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083439 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083484 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083512 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083572 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083615 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083523 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083633 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083685 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083681 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083733 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083569 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083784 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083772 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083825 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083864 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083899 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083901 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083583 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083905 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083968 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.083978 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.084013 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.084045 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.084073 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.084129 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.084157 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.084255 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: E1125 08:49:01.148910 4932 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" interval="800ms" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.233039 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.238218 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.260706 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.273277 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.278602 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 08:49:01 crc kubenswrapper[4932]: W1125 08:49:01.280783 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-9fddce1e54bf3bad93b478f83fb6cd0bab31ac0490abd98f3fb216a6b172d9e7 WatchSource:0}: Error finding container 9fddce1e54bf3bad93b478f83fb6cd0bab31ac0490abd98f3fb216a6b172d9e7: Status 404 returned error can't find the container with id 9fddce1e54bf3bad93b478f83fb6cd0bab31ac0490abd98f3fb216a6b172d9e7 Nov 25 08:49:01 crc kubenswrapper[4932]: W1125 08:49:01.282339 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-e081c0a11a0530abcf1da123744dd2c36377bb3f2f670281f23003be63347db3 WatchSource:0}: Error finding container e081c0a11a0530abcf1da123744dd2c36377bb3f2f670281f23003be63347db3: Status 404 returned error can't find the container with id e081c0a11a0530abcf1da123744dd2c36377bb3f2f670281f23003be63347db3 Nov 25 08:49:01 crc kubenswrapper[4932]: W1125 08:49:01.294131 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-7f3f5dacf30aa57872efbd528e7f8419dc71fc3bb199c976a1a309065eeac7d0 WatchSource:0}: Error finding container 7f3f5dacf30aa57872efbd528e7f8419dc71fc3bb199c976a1a309065eeac7d0: Status 404 returned error can't find the container with id 7f3f5dacf30aa57872efbd528e7f8419dc71fc3bb199c976a1a309065eeac7d0 Nov 25 08:49:01 crc kubenswrapper[4932]: W1125 08:49:01.299399 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-fa2e9f37e579fca4c7af63d92b7431640667887bee77ef71cee4156c00e3dda3 WatchSource:0}: Error finding container fa2e9f37e579fca4c7af63d92b7431640667887bee77ef71cee4156c00e3dda3: Status 404 returned error can't find the container with id fa2e9f37e579fca4c7af63d92b7431640667887bee77ef71cee4156c00e3dda3 Nov 25 08:49:01 crc kubenswrapper[4932]: W1125 08:49:01.300984 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-f2e234ad4a7c2a6c2597f6fc86d7fe8e9edc5cddd7c2eec02e678c3290638188 WatchSource:0}: Error finding container f2e234ad4a7c2a6c2597f6fc86d7fe8e9edc5cddd7c2eec02e678c3290638188: Status 404 returned error can't find the container with id f2e234ad4a7c2a6c2597f6fc86d7fe8e9edc5cddd7c2eec02e678c3290638188 Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.437056 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.438341 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.438369 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.438380 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.438406 4932 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 08:49:01 crc kubenswrapper[4932]: E1125 08:49:01.438898 4932 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.77:6443: connect: connection refused" node="crc" Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.540746 4932 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.610072 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"e081c0a11a0530abcf1da123744dd2c36377bb3f2f670281f23003be63347db3"} Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.611817 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"9fddce1e54bf3bad93b478f83fb6cd0bab31ac0490abd98f3fb216a6b172d9e7"} Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.613219 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f2e234ad4a7c2a6c2597f6fc86d7fe8e9edc5cddd7c2eec02e678c3290638188"} Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.614523 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"fa2e9f37e579fca4c7af63d92b7431640667887bee77ef71cee4156c00e3dda3"} Nov 25 08:49:01 crc kubenswrapper[4932]: I1125 08:49:01.615494 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7f3f5dacf30aa57872efbd528e7f8419dc71fc3bb199c976a1a309065eeac7d0"} Nov 25 08:49:01 crc kubenswrapper[4932]: W1125 08:49:01.654492 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:01 crc kubenswrapper[4932]: E1125 08:49:01.654560 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:01 crc kubenswrapper[4932]: W1125 08:49:01.775671 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:01 crc kubenswrapper[4932]: E1125 08:49:01.775769 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:01 crc kubenswrapper[4932]: W1125 08:49:01.832544 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:01 crc kubenswrapper[4932]: E1125 08:49:01.832618 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:01 crc kubenswrapper[4932]: E1125 08:49:01.949977 4932 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" interval="1.6s" Nov 25 08:49:02 crc kubenswrapper[4932]: W1125 08:49:02.094822 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:02 crc kubenswrapper[4932]: E1125 08:49:02.094906 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.239531 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.240653 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.240685 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.240694 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.240715 4932 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 08:49:02 crc kubenswrapper[4932]: E1125 08:49:02.241092 4932 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.77:6443: connect: connection refused" node="crc" Nov 25 08:49:02 crc kubenswrapper[4932]: E1125 08:49:02.368337 4932 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.77:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b33b7190deb60 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 08:49:00.53805552 +0000 UTC m=+0.664085103,LastTimestamp:2025-11-25 08:49:00.53805552 +0000 UTC m=+0.664085103,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.540772 4932 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.622460 4932 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85" exitCode=0 Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.622562 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85"} Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.622698 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.624692 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.624758 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.624782 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.624920 4932 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db" exitCode=0 Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.625019 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db"} Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.625166 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.627082 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.627142 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.627166 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.630389 4932 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="3d40aeccf50552a790adf9f073813f84671bf1ead9588a1caef7a9f39a6008eb" exitCode=0 Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.630457 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"3d40aeccf50552a790adf9f073813f84671bf1ead9588a1caef7a9f39a6008eb"} Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.630580 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.632038 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.632060 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.632069 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.635083 4932 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81" exitCode=0 Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.635157 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81"} Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.635271 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.636416 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.636451 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.636464 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.637699 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.638531 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.638554 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.638566 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.640108 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1"} Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.640132 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0"} Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.640145 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4"} Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.640157 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79"} Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.641181 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.642488 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.642507 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:02 crc kubenswrapper[4932]: I1125 08:49:02.642515 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:03 crc kubenswrapper[4932]: W1125 08:49:03.537118 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:03 crc kubenswrapper[4932]: E1125 08:49:03.537240 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.540374 4932 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:03 crc kubenswrapper[4932]: E1125 08:49:03.551477 4932 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" interval="3.2s" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.643863 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"ff2840144d631dc539ccb7c6c3a3d2a0f10890544339bed232ac76313641ffae"} Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.643955 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.645365 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.645391 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.645400 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.647043 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb"} Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.647070 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a"} Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.647097 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35"} Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.647110 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961"} Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.651646 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"6d3939adbaf71715989609ee9a73b99541968cd43b3bf1af3cfe6a8d2e1c9b4b"} Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.651687 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bda5bbf2bfe9fb4f5ff03e336f536c431fe2c89ae303bdb6afcb0bbb04b87641"} Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.651703 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"0e8f66d3dfcfc67f90083e6611e327c729d82111f820986cad673a82792d5d80"} Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.651740 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.654812 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.654856 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.654867 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.657992 4932 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c" exitCode=0 Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.658126 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.658146 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.658807 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c"} Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.659167 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.659215 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.659227 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.659275 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.659300 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.659313 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:03 crc kubenswrapper[4932]: W1125 08:49:03.774998 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:03 crc kubenswrapper[4932]: E1125 08:49:03.775068 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.841373 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.842373 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.842403 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.842412 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:03 crc kubenswrapper[4932]: I1125 08:49:03.842432 4932 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 08:49:03 crc kubenswrapper[4932]: E1125 08:49:03.842813 4932 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.77:6443: connect: connection refused" node="crc" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.153214 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.612851 4932 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:04 crc kubenswrapper[4932]: W1125 08:49:04.617501 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:04 crc kubenswrapper[4932]: E1125 08:49:04.617591 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.621941 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.666217 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c"} Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.666405 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.667355 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.667380 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.667392 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.669280 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.669690 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.670033 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f"} Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.670141 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.670517 4932 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.670547 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.671398 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.671423 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.671433 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.671868 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.671885 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.671893 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.672268 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.672287 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.672304 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.672775 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.672795 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:04 crc kubenswrapper[4932]: I1125 08:49:04.672804 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:04 crc kubenswrapper[4932]: W1125 08:49:04.770514 4932 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.77:6443: connect: connection refused Nov 25 08:49:04 crc kubenswrapper[4932]: E1125 08:49:04.770596 4932 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.77:6443: connect: connection refused" logger="UnhandledError" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.464011 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.586531 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.673684 4932 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f" exitCode=0 Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.673803 4932 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.673841 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.673891 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.674331 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f"} Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.673841 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.674436 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.674479 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.674794 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.674829 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.674841 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.674899 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.674917 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.674928 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.675067 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.675091 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.675102 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.675447 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.675521 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.675536 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:05 crc kubenswrapper[4932]: I1125 08:49:05.696364 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.680803 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92"} Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.680879 4932 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.680900 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08"} Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.680923 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.680923 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382"} Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.681008 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca"} Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.680905 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.682017 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.682063 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.682081 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.682248 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.682283 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:06 crc kubenswrapper[4932]: I1125 08:49:06.682294 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.043384 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.045087 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.045133 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.045143 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.045176 4932 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.689565 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.690137 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.690445 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4"} Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.690853 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.690882 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.690890 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.691513 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.691539 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:07 crc kubenswrapper[4932]: I1125 08:49:07.691552 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:08 crc kubenswrapper[4932]: I1125 08:49:08.293139 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:08 crc kubenswrapper[4932]: I1125 08:49:08.692634 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:08 crc kubenswrapper[4932]: I1125 08:49:08.692690 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:08 crc kubenswrapper[4932]: I1125 08:49:08.694416 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:08 crc kubenswrapper[4932]: I1125 08:49:08.694444 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:08 crc kubenswrapper[4932]: I1125 08:49:08.694416 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:08 crc kubenswrapper[4932]: I1125 08:49:08.694481 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:08 crc kubenswrapper[4932]: I1125 08:49:08.694500 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:08 crc kubenswrapper[4932]: I1125 08:49:08.694452 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:09 crc kubenswrapper[4932]: I1125 08:49:09.180965 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:09 crc kubenswrapper[4932]: I1125 08:49:09.181153 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:09 crc kubenswrapper[4932]: I1125 08:49:09.182491 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:09 crc kubenswrapper[4932]: I1125 08:49:09.182528 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:09 crc kubenswrapper[4932]: I1125 08:49:09.182539 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:09 crc kubenswrapper[4932]: I1125 08:49:09.613728 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 25 08:49:09 crc kubenswrapper[4932]: I1125 08:49:09.696053 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:09 crc kubenswrapper[4932]: I1125 08:49:09.697353 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:09 crc kubenswrapper[4932]: I1125 08:49:09.697387 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:09 crc kubenswrapper[4932]: I1125 08:49:09.697398 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:10 crc kubenswrapper[4932]: E1125 08:49:10.739315 4932 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 08:49:12 crc kubenswrapper[4932]: I1125 08:49:12.271418 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:12 crc kubenswrapper[4932]: I1125 08:49:12.271573 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:12 crc kubenswrapper[4932]: I1125 08:49:12.272816 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:12 crc kubenswrapper[4932]: I1125 08:49:12.272848 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:12 crc kubenswrapper[4932]: I1125 08:49:12.272856 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:12 crc kubenswrapper[4932]: I1125 08:49:12.276087 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:12 crc kubenswrapper[4932]: I1125 08:49:12.704400 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:12 crc kubenswrapper[4932]: I1125 08:49:12.705558 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:12 crc kubenswrapper[4932]: I1125 08:49:12.705616 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:12 crc kubenswrapper[4932]: I1125 08:49:12.705627 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:13 crc kubenswrapper[4932]: I1125 08:49:13.984874 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 25 08:49:13 crc kubenswrapper[4932]: I1125 08:49:13.985702 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:13 crc kubenswrapper[4932]: I1125 08:49:13.987480 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:13 crc kubenswrapper[4932]: I1125 08:49:13.987513 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:13 crc kubenswrapper[4932]: I1125 08:49:13.987529 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.272421 4932 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.272521 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.324138 4932 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:60804->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.324250 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:60804->192.168.126.11:17697: read: connection reset by peer" Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.324162 4932 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:60816->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.324343 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:60816->192.168.126.11:17697: read: connection reset by peer" Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.541888 4932 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.703723 4932 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.703821 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/livez\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.713475 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.714961 4932 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c" exitCode=255 Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.715004 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c"} Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.715199 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.715946 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.715971 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.715979 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.716420 4932 scope.go:117] "RemoveContainer" containerID="b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c" Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.816358 4932 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 08:49:15 crc kubenswrapper[4932]: I1125 08:49:15.816442 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 08:49:16 crc kubenswrapper[4932]: I1125 08:49:16.719369 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 08:49:16 crc kubenswrapper[4932]: I1125 08:49:16.720949 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d"} Nov 25 08:49:16 crc kubenswrapper[4932]: I1125 08:49:16.721099 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:16 crc kubenswrapper[4932]: I1125 08:49:16.721923 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:16 crc kubenswrapper[4932]: I1125 08:49:16.721957 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:16 crc kubenswrapper[4932]: I1125 08:49:16.721969 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.700532 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.700659 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.700751 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.701734 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.701766 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.701775 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.705077 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.730501 4932 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.731325 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.731355 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.731364 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:20 crc kubenswrapper[4932]: E1125 08:49:20.739451 4932 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 08:49:20 crc kubenswrapper[4932]: E1125 08:49:20.809113 4932 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.815797 4932 trace.go:236] Trace[2113323591]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 08:49:07.699) (total time: 13115ms): Nov 25 08:49:20 crc kubenswrapper[4932]: Trace[2113323591]: ---"Objects listed" error: 13115ms (08:49:20.815) Nov 25 08:49:20 crc kubenswrapper[4932]: Trace[2113323591]: [13.115775791s] [13.115775791s] END Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.815833 4932 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.815843 4932 trace.go:236] Trace[1852539370]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 08:49:08.645) (total time: 12170ms): Nov 25 08:49:20 crc kubenswrapper[4932]: Trace[1852539370]: ---"Objects listed" error: 12170ms (08:49:20.815) Nov 25 08:49:20 crc kubenswrapper[4932]: Trace[1852539370]: [12.170315943s] [12.170315943s] END Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.815882 4932 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.815970 4932 trace.go:236] Trace[1599084024]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 08:49:08.386) (total time: 12429ms): Nov 25 08:49:20 crc kubenswrapper[4932]: Trace[1599084024]: ---"Objects listed" error: 12429ms (08:49:20.815) Nov 25 08:49:20 crc kubenswrapper[4932]: Trace[1599084024]: [12.429186502s] [12.429186502s] END Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.815988 4932 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.816278 4932 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.818643 4932 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.828717 4932 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.828953 4932 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.830031 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.830057 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.830066 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.830087 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.830099 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:20Z","lastTransitionTime":"2025-11-25T08:49:20Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:20 crc kubenswrapper[4932]: E1125 08:49:20.845474 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.850234 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.850278 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.850286 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.850306 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.850318 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:20Z","lastTransitionTime":"2025-11-25T08:49:20Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:20 crc kubenswrapper[4932]: E1125 08:49:20.862411 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.870131 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.870171 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.870181 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.870231 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.870245 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:20Z","lastTransitionTime":"2025-11-25T08:49:20Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:20 crc kubenswrapper[4932]: E1125 08:49:20.888530 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.891968 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.892005 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.892014 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.892033 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.892044 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:20Z","lastTransitionTime":"2025-11-25T08:49:20Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:20 crc kubenswrapper[4932]: E1125 08:49:20.903395 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.907737 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.907800 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.907815 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.907843 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.907859 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:20Z","lastTransitionTime":"2025-11-25T08:49:20Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:20 crc kubenswrapper[4932]: E1125 08:49:20.917301 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:20Z\\\",\\\"message\\\":\\\"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:20 crc kubenswrapper[4932]: E1125 08:49:20.917467 4932 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.919261 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.919305 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.919320 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.919351 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:20 crc kubenswrapper[4932]: I1125 08:49:20.919370 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:20Z","lastTransitionTime":"2025-11-25T08:49:20Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.021522 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.021573 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.021586 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.021612 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.021624 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:21Z","lastTransitionTime":"2025-11-25T08:49:21Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.123779 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.123820 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.123829 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.123848 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.123860 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:21Z","lastTransitionTime":"2025-11-25T08:49:21Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.225873 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.225909 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.225918 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.225935 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.225945 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:21Z","lastTransitionTime":"2025-11-25T08:49:21Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.328030 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.328105 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.328119 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.328148 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.328161 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:21Z","lastTransitionTime":"2025-11-25T08:49:21Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.430038 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.430087 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.430100 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.430122 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.430136 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:21Z","lastTransitionTime":"2025-11-25T08:49:21Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.532403 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.532443 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.532454 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.532475 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.532487 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:21Z","lastTransitionTime":"2025-11-25T08:49:21Z","reason":"KubeletNotReady","message":"[container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?, CSINode is not yet initialized]"} Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.623865 4932 apiserver.go:52] "Watching apiserver" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.626602 4932 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.626810 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-dns/node-resolver-pb6ll","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.627218 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.627268 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.627462 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.627596 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.627710 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.627505 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.627792 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.627497 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.627966 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.628043 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-pb6ll" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.629301 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.629606 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.629967 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.630084 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.630172 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.630362 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.630537 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.630731 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.630938 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.631275 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.631396 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.634183 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.635114 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.635286 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.635365 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.635474 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.635544 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:21Z","lastTransitionTime":"2025-11-25T08:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.644430 4932 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.647531 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.660860 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.675126 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.684083 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.716120 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.721169 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.721472 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.721576 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.721657 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.721752 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.721851 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.721944 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722044 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722130 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722233 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.721764 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722010 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722029 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722133 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722232 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722350 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722433 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722453 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722481 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722504 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722527 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722552 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722574 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722598 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722620 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722641 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722662 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722686 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722709 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722733 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722755 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722780 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722802 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722824 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722846 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722868 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722889 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722911 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722935 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722957 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722979 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722999 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723021 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723042 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723064 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723085 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723116 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723142 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723164 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723209 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723233 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723256 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723279 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723302 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723325 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723347 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723385 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723406 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723428 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723480 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723520 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723540 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723561 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723582 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723602 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723624 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723647 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723670 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722499 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723692 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722665 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722667 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722697 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722753 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.722844 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723169 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723423 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723481 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723499 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723617 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723662 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723744 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723846 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723871 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723879 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723914 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723946 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723970 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.723996 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724020 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724030 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724048 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724075 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724091 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724100 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724128 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724151 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724160 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724177 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724222 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724252 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724280 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724306 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724321 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724326 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724372 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724538 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724330 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724585 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724609 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724632 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724658 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724681 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724706 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724729 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724751 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724773 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724793 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724814 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724833 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724863 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724883 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724907 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724929 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724949 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724972 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724993 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725012 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725034 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725055 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725076 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725096 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725116 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725138 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725157 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725177 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725221 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725243 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725267 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725287 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725306 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725338 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725362 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725385 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725405 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725424 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725446 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725469 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725490 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725510 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725531 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725551 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725572 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725595 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725619 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725640 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725663 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725686 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725708 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725728 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725749 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725770 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725792 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725812 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725833 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725857 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725880 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725901 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725924 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725946 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725966 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.725986 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726011 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726034 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726056 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726077 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726099 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726122 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726155 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726180 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726229 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726253 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726277 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726300 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726321 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726344 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726368 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726390 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726411 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726434 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726461 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726483 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726507 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726528 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726553 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726576 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726604 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726629 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726651 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726674 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726696 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726719 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726741 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726773 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726796 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726820 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726843 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726865 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726972 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726997 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727021 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727043 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727066 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727088 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727110 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727134 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727157 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727181 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727695 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727749 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727774 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727799 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727856 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkqg4\" (UniqueName: \"kubernetes.io/projected/531c7937-727f-4ac5-9e26-0d7efacf93d3-kube-api-access-mkqg4\") pod \"node-resolver-pb6ll\" (UID: \"531c7937-727f-4ac5-9e26-0d7efacf93d3\") " pod="openshift-dns/node-resolver-pb6ll" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727890 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727917 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/531c7937-727f-4ac5-9e26-0d7efacf93d3-hosts-file\") pod \"node-resolver-pb6ll\" (UID: \"531c7937-727f-4ac5-9e26-0d7efacf93d3\") " pod="openshift-dns/node-resolver-pb6ll" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727946 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727970 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727992 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728018 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728045 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728069 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728096 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728120 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728143 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728164 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728206 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728230 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728251 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728332 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728350 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728365 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728381 4932 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728396 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728409 4932 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728421 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728434 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728447 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728461 4932 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728473 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728487 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728500 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728514 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728526 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728539 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728552 4932 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728565 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728577 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728591 4932 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728603 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728615 4932 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728628 4932 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728640 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728654 4932 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728667 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.728681 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724550 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724579 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724587 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724649 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.724783 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.726621 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.727033 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.729351 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.729415 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.730429 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.730518 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.730533 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.730671 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.730730 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.730887 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.731238 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.731669 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.731793 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.731872 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.732080 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.732097 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.732317 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.732468 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.732625 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.732796 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.734121 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.734250 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.734353 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.734645 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.734800 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.735199 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.736076 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.736083 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.736561 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.736667 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.736671 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.736902 4932 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.737125 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.737280 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.737771 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.737837 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.738115 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.739994 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.740385 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.741464 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.741513 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.741552 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.741849 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.741931 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.742009 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:49:22.241983858 +0000 UTC m=+22.368013421 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.742265 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.742354 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.742565 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.742616 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.742629 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.742852 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.742887 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.743549 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.744645 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.745237 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.747442 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.747569 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.748000 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.749025 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.749026 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.749241 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.749420 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.749589 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.750997 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.751204 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.751385 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.751689 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.751875 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.751897 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.751907 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.751924 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.751937 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:21Z","lastTransitionTime":"2025-11-25T08:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.752102 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.753156 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.753839 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.755261 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.755763 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.755959 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.756038 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.756146 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.756484 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.756884 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.756783 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.757160 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.757247 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.757464 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.757789 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.758321 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.758720 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.758998 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.759082 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.759323 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.759562 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.759932 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.760181 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.760215 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.757113 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.760377 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.760384 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.760334 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.760993 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.761014 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.761068 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.761174 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.761340 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.761409 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.761494 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.761659 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.761895 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.761900 4932 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.761926 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.761994 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:22.26197008 +0000 UTC m=+22.387999843 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.762243 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.762329 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.762358 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.762546 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.762594 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.762759 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.763041 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.763056 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.763103 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.763159 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.763354 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.763428 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.763624 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.763878 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.763982 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.764375 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.764481 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.764880 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.764965 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.765021 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.765118 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.765296 4932 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.765335 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.765356 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:22.265337888 +0000 UTC m=+22.391367451 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.765380 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.765575 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.765650 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.765794 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.766055 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.766257 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.766397 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.766838 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.767503 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.767542 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.767657 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.767939 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.767967 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.768313 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.768525 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.768866 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.768994 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.769053 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.770105 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.770338 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.771638 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.771712 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.772090 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.772131 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.772533 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.773160 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.773460 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.773497 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.773558 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.773725 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.773912 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.773937 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.774466 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.774912 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.777655 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.785907 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.786149 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.786595 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.786669 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.788642 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.788666 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.788681 4932 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.788748 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:22.288726958 +0000 UTC m=+22.414756521 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.789056 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.789087 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.789112 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.789122 4932 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:21 crc kubenswrapper[4932]: E1125 08:49:21.789162 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:22.289151129 +0000 UTC m=+22.415180692 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.792052 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.792490 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.795301 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.797110 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.797890 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.799569 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.803597 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.814027 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.818708 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.829775 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/531c7937-727f-4ac5-9e26-0d7efacf93d3-hosts-file\") pod \"node-resolver-pb6ll\" (UID: \"531c7937-727f-4ac5-9e26-0d7efacf93d3\") " pod="openshift-dns/node-resolver-pb6ll" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.829832 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkqg4\" (UniqueName: \"kubernetes.io/projected/531c7937-727f-4ac5-9e26-0d7efacf93d3-kube-api-access-mkqg4\") pod \"node-resolver-pb6ll\" (UID: \"531c7937-727f-4ac5-9e26-0d7efacf93d3\") " pod="openshift-dns/node-resolver-pb6ll" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.829862 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.829881 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830014 4932 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830034 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830046 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830061 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830073 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830085 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830095 4932 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830106 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830117 4932 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830128 4932 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830139 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830150 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830161 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830175 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830234 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830249 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830260 4932 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830280 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830291 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830302 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830312 4932 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830323 4932 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830334 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830346 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830356 4932 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830357 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/531c7937-727f-4ac5-9e26-0d7efacf93d3-hosts-file\") pod \"node-resolver-pb6ll\" (UID: \"531c7937-727f-4ac5-9e26-0d7efacf93d3\") " pod="openshift-dns/node-resolver-pb6ll" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830412 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830462 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830368 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830485 4932 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830498 4932 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830511 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830523 4932 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830536 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830548 4932 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830560 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830571 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830583 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830594 4932 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830605 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830617 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830629 4932 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830640 4932 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830651 4932 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830663 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830673 4932 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830684 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830694 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830705 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830717 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830728 4932 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830739 4932 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830750 4932 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830760 4932 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830773 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830785 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830796 4932 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830807 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830817 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830828 4932 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830838 4932 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830850 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830862 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830872 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830883 4932 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830894 4932 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830905 4932 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830916 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830927 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830939 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830950 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830963 4932 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830976 4932 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830987 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.830997 4932 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831007 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831017 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831028 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831039 4932 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831050 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831060 4932 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831072 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831084 4932 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831094 4932 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831104 4932 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831114 4932 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831124 4932 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831135 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831146 4932 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831158 4932 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831170 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831183 4932 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831210 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831222 4932 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831233 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831245 4932 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831255 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831266 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831277 4932 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831287 4932 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831298 4932 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831309 4932 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831319 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831330 4932 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831341 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831352 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831362 4932 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831372 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831383 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831393 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831404 4932 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831414 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831425 4932 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831436 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831447 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831457 4932 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831469 4932 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831479 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831489 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831500 4932 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831510 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831521 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831532 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831543 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831556 4932 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831568 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831580 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831592 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831603 4932 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831613 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831624 4932 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831634 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831645 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831656 4932 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831693 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831705 4932 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831716 4932 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831728 4932 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831741 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831752 4932 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831765 4932 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831777 4932 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831788 4932 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831799 4932 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831811 4932 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831824 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831836 4932 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831848 4932 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831859 4932 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831870 4932 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831882 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831895 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831906 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831918 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831929 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831942 4932 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831953 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831965 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831978 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.831991 4932 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.832002 4932 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.832033 4932 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.832044 4932 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.832056 4932 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.832067 4932 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.832078 4932 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.832089 4932 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.832100 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.832111 4932 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.832123 4932 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.856239 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkqg4\" (UniqueName: \"kubernetes.io/projected/531c7937-727f-4ac5-9e26-0d7efacf93d3-kube-api-access-mkqg4\") pod \"node-resolver-pb6ll\" (UID: \"531c7937-727f-4ac5-9e26-0d7efacf93d3\") " pod="openshift-dns/node-resolver-pb6ll" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.858578 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.858618 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.858631 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.858648 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.858660 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:21Z","lastTransitionTime":"2025-11-25T08:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.949101 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.956485 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.960855 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.960907 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.960918 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.960935 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.960947 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:21Z","lastTransitionTime":"2025-11-25T08:49:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.962578 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 08:49:21 crc kubenswrapper[4932]: W1125 08:49:21.963103 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-c0b652acf30896c594a984456297e50961f584343c60c261573a327b89f73a42 WatchSource:0}: Error finding container c0b652acf30896c594a984456297e50961f584343c60c261573a327b89f73a42: Status 404 returned error can't find the container with id c0b652acf30896c594a984456297e50961f584343c60c261573a327b89f73a42 Nov 25 08:49:21 crc kubenswrapper[4932]: I1125 08:49:21.968084 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-pb6ll" Nov 25 08:49:21 crc kubenswrapper[4932]: W1125 08:49:21.979698 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-27530b0c9b76fb8fe9d0d6462fa7f2feb345f28fe61744a8b1f2f2058b3928cc WatchSource:0}: Error finding container 27530b0c9b76fb8fe9d0d6462fa7f2feb345f28fe61744a8b1f2f2058b3928cc: Status 404 returned error can't find the container with id 27530b0c9b76fb8fe9d0d6462fa7f2feb345f28fe61744a8b1f2f2058b3928cc Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.065273 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.065685 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.065697 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.065711 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.065722 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:22Z","lastTransitionTime":"2025-11-25T08:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.181175 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.181215 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.181223 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.181235 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.181246 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:22Z","lastTransitionTime":"2025-11-25T08:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.282419 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.289181 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.289273 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.289291 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.289317 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.289337 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:22Z","lastTransitionTime":"2025-11-25T08:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.296812 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.297436 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.309747 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.325959 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.341807 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.341908 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.341971 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:49:23.341941074 +0000 UTC m=+23.467970637 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.342010 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.342054 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342072 4932 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.342092 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342121 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:23.342112278 +0000 UTC m=+23.468141831 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342220 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342232 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342240 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342233 4932 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342365 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:23.342335724 +0000 UTC m=+23.468365437 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342248 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342412 4932 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342465 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:23.342456937 +0000 UTC m=+23.468486710 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342256 4932 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.342510 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:23.342502609 +0000 UTC m=+23.468532382 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.346066 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.360608 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-plbqh"] Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.361036 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.363620 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.364707 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.364767 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.365447 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.365769 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.365769 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.381995 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.399495 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.399539 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.399549 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.399565 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.399575 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:22Z","lastTransitionTime":"2025-11-25T08:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.400431 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.422760 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.436515 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.442800 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fc52f208-3635-4b33-a1f2-720bcff56064-rootfs\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.442853 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fc52f208-3635-4b33-a1f2-720bcff56064-proxy-tls\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.442885 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fc52f208-3635-4b33-a1f2-720bcff56064-mcd-auth-proxy-config\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.442910 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpmnl\" (UniqueName: \"kubernetes.io/projected/fc52f208-3635-4b33-a1f2-720bcff56064-kube-api-access-mpmnl\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.449292 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.460723 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.472779 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.485890 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.500655 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.502092 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.502154 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.502166 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.502213 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.502227 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:22Z","lastTransitionTime":"2025-11-25T08:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.512717 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.523208 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.533559 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.543743 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fc52f208-3635-4b33-a1f2-720bcff56064-proxy-tls\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.543803 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fc52f208-3635-4b33-a1f2-720bcff56064-mcd-auth-proxy-config\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.543827 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpmnl\" (UniqueName: \"kubernetes.io/projected/fc52f208-3635-4b33-a1f2-720bcff56064-kube-api-access-mpmnl\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.543871 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fc52f208-3635-4b33-a1f2-720bcff56064-rootfs\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.543956 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/fc52f208-3635-4b33-a1f2-720bcff56064-rootfs\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.544869 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fc52f208-3635-4b33-a1f2-720bcff56064-mcd-auth-proxy-config\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.548646 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fc52f208-3635-4b33-a1f2-720bcff56064-proxy-tls\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.553208 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.563622 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.604693 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.604732 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.604745 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.604759 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.604772 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:22Z","lastTransitionTime":"2025-11-25T08:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.608943 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.609599 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.611226 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.612063 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.613293 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.613892 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.614539 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.617153 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.617941 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.619121 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.619832 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.621162 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.621777 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.622448 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.623635 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.624235 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.626910 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.627354 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.628055 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.629106 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.629674 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.630799 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.631311 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.632643 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.633110 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.633834 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.635137 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.635843 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.636942 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.637511 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.638868 4932 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.639004 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.641141 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.642374 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.642877 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.644836 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.645609 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.646663 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.647492 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.648613 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.649166 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.650237 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.650960 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.652320 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.652953 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.653924 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.654462 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.655751 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.656335 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.657353 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.657839 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.658843 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.659599 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.660212 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.662459 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpmnl\" (UniqueName: \"kubernetes.io/projected/fc52f208-3635-4b33-a1f2-720bcff56064-kube-api-access-mpmnl\") pod \"machine-config-daemon-plbqh\" (UID: \"fc52f208-3635-4b33-a1f2-720bcff56064\") " pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.676357 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:49:22 crc kubenswrapper[4932]: W1125 08:49:22.689834 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfc52f208_3635_4b33_a1f2_720bcff56064.slice/crio-3fe5cad291b52b2bf8ba6788f4dbf714839534b35bb132ea83afa7c006afa5d0 WatchSource:0}: Error finding container 3fe5cad291b52b2bf8ba6788f4dbf714839534b35bb132ea83afa7c006afa5d0: Status 404 returned error can't find the container with id 3fe5cad291b52b2bf8ba6788f4dbf714839534b35bb132ea83afa7c006afa5d0 Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.710099 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.710146 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.710156 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.710174 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.710199 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:22Z","lastTransitionTime":"2025-11-25T08:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.751261 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-jmvtb"] Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.752223 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-kvhb4"] Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.752364 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.752608 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: W1125 08:49:22.753972 4932 reflector.go:561] object-"openshift-multus"/"cni-copy-resources": failed to list *v1.ConfigMap: configmaps "cni-copy-resources" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 25 08:49:22 crc kubenswrapper[4932]: W1125 08:49:22.753990 4932 reflector.go:561] object-"openshift-multus"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.754021 4932 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"cni-copy-resources\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"cni-copy-resources\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.754032 4932 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 08:49:22 crc kubenswrapper[4932]: W1125 08:49:22.753990 4932 reflector.go:561] object-"openshift-multus"/"default-cni-sysctl-allowlist": failed to list *v1.ConfigMap: configmaps "default-cni-sysctl-allowlist" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.754071 4932 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"default-cni-sysctl-allowlist\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"default-cni-sysctl-allowlist\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 08:49:22 crc kubenswrapper[4932]: W1125 08:49:22.754311 4932 reflector.go:561] object-"openshift-multus"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.754334 4932 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 08:49:22 crc kubenswrapper[4932]: W1125 08:49:22.755717 4932 reflector.go:561] object-"openshift-multus"/"default-dockercfg-2q5b6": failed to list *v1.Secret: secrets "default-dockercfg-2q5b6" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.755785 4932 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"default-dockercfg-2q5b6\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"default-dockercfg-2q5b6\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 08:49:22 crc kubenswrapper[4932]: W1125 08:49:22.755842 4932 reflector.go:561] object-"openshift-multus"/"multus-daemon-config": failed to list *v1.ConfigMap: configmaps "multus-daemon-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-multus": no relationship found between node 'crc' and this object Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.755857 4932 reflector.go:158] "Unhandled Error" err="object-\"openshift-multus\"/\"multus-daemon-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"multus-daemon-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-multus\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.757891 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"3fe5cad291b52b2bf8ba6788f4dbf714839534b35bb132ea83afa7c006afa5d0"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.761116 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.762246 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.762302 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.762317 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"27530b0c9b76fb8fe9d0d6462fa7f2feb345f28fe61744a8b1f2f2058b3928cc"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.764285 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.764331 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"a4034a497a63340f114528f648a8022d42a2fa4a5a17dbc3254f8319fc9d2023"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.778149 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.779690 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-pb6ll" event={"ID":"531c7937-727f-4ac5-9e26-0d7efacf93d3","Type":"ContainerStarted","Data":"7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.779740 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-pb6ll" event={"ID":"531c7937-727f-4ac5-9e26-0d7efacf93d3","Type":"ContainerStarted","Data":"76d14b1b0a6510b6fda60abce01fb4688e27d352716a4434112f430eeb41056c"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.784711 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c0b652acf30896c594a984456297e50961f584343c60c261573a327b89f73a42"} Nov 25 08:49:22 crc kubenswrapper[4932]: E1125 08:49:22.798273 4932 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.816497 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.817167 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.817294 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.817362 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.817437 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.817501 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:22Z","lastTransitionTime":"2025-11-25T08:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.847235 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.847954 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-run-multus-certs\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.847997 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-run-netns\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848023 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zlq2\" (UniqueName: \"kubernetes.io/projected/343dd8b2-7428-4b00-9c0a-00f728022d6d-kube-api-access-5zlq2\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848064 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-etc-kubernetes\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848106 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848130 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-var-lib-kubelet\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848153 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-var-lib-cni-multus\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848211 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-cnibin\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848232 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-conf-dir\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848253 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-daemon-config\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848273 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-binary-copy\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848327 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbfbw\" (UniqueName: \"kubernetes.io/projected/199dbdf9-e2fc-459e-9e17-f5d520309f0a-kube-api-access-pbfbw\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848359 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-socket-dir-parent\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848383 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-var-lib-cni-bin\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848407 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-hostroot\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848434 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-run-k8s-cni-cncf-io\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848457 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848504 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-cni-dir\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848535 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-cnibin\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848558 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-os-release\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848580 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-system-cni-dir\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848603 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-os-release\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848623 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/199dbdf9-e2fc-459e-9e17-f5d520309f0a-cni-binary-copy\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.848647 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-system-cni-dir\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.876079 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.898276 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.920119 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.920153 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.920165 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.920181 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.920205 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:22Z","lastTransitionTime":"2025-11-25T08:49:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.930335 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.949964 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-cnibin\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950012 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-os-release\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950045 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-system-cni-dir\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950064 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-os-release\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950085 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/199dbdf9-e2fc-459e-9e17-f5d520309f0a-cni-binary-copy\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950106 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-system-cni-dir\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950126 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-run-netns\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950148 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-run-multus-certs\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950173 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zlq2\" (UniqueName: \"kubernetes.io/projected/343dd8b2-7428-4b00-9c0a-00f728022d6d-kube-api-access-5zlq2\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950225 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-etc-kubernetes\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950276 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950298 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-var-lib-kubelet\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950328 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-var-lib-cni-multus\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950350 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-cnibin\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950370 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-conf-dir\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950391 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-daemon-config\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950412 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-binary-copy\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950435 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbfbw\" (UniqueName: \"kubernetes.io/projected/199dbdf9-e2fc-459e-9e17-f5d520309f0a-kube-api-access-pbfbw\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950455 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-hostroot\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950479 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-socket-dir-parent\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950500 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-var-lib-cni-bin\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950537 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-run-k8s-cni-cncf-io\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950559 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950580 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-cni-dir\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950809 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-cni-dir\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950859 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-cnibin\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950914 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-os-release\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.950968 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-system-cni-dir\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951014 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-os-release\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951094 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-system-cni-dir\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951123 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-run-netns\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951151 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-run-multus-certs\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951454 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-etc-kubernetes\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951701 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-cnibin\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951701 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-var-lib-cni-bin\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951757 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-socket-dir-parent\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951773 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-var-lib-kubelet\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951801 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-var-lib-cni-multus\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951775 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951867 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-conf-dir\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951872 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-host-run-k8s-cni-cncf-io\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.951870 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/199dbdf9-e2fc-459e-9e17-f5d520309f0a-hostroot\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.952103 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/343dd8b2-7428-4b00-9c0a-00f728022d6d-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:22 crc kubenswrapper[4932]: I1125 08:49:22.982788 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.002127 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.022845 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.022888 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.022898 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.022914 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.022924 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:23Z","lastTransitionTime":"2025-11-25T08:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.023392 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.041791 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.054621 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.068055 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.077728 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.087794 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.101642 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.113708 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.125738 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.125776 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.125786 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.125801 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.125813 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:23Z","lastTransitionTime":"2025-11-25T08:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.129675 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.131239 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rlhks"] Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.134736 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.139389 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.139646 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.139776 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.139923 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.139792 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.140573 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.140788 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.154535 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.168850 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.185380 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.203089 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.217631 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.228385 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.228429 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.228439 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.228456 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.228469 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:23Z","lastTransitionTime":"2025-11-25T08:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.229008 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.245383 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.252827 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-netns\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.252975 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-ovn-kubernetes\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.253056 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2h2l2\" (UniqueName: \"kubernetes.io/projected/24f5eec6-6332-4bae-bce3-4faa1156c249-kube-api-access-2h2l2\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.253144 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-openvswitch\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.253241 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-slash\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.253333 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-var-lib-openvswitch\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.253442 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.253568 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-systemd-units\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.253722 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-netd\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.253845 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-node-log\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.253982 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-systemd\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.254116 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-kubelet\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.254236 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-env-overrides\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.254369 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-ovn\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.254468 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-config\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.254622 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-bin\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.254712 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-script-lib\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.254749 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-log-socket\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.254778 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/24f5eec6-6332-4bae-bce3-4faa1156c249-ovn-node-metrics-cert\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.254817 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-etc-openvswitch\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.258688 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.275338 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.290061 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.305542 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.324464 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.330882 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.330934 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.330946 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.330964 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.330975 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:23Z","lastTransitionTime":"2025-11-25T08:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.347400 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.355528 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.355640 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.355663 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-systemd\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.355678 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-node-log\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.355706 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-kubelet\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.355735 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:49:25.355707175 +0000 UTC m=+25.481736878 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.355751 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-kubelet\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.355779 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-env-overrides\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.355810 4932 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.355833 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-ovn\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.355859 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:25.355839148 +0000 UTC m=+25.481868701 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.355889 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-config\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.355935 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-bin\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.355979 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-script-lib\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356010 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-log-socket\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356040 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/24f5eec6-6332-4bae-bce3-4faa1156c249-ovn-node-metrics-cert\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356090 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356116 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-etc-openvswitch\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356139 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-systemd\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356140 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-ovn-kubernetes\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356201 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2h2l2\" (UniqueName: \"kubernetes.io/projected/24f5eec6-6332-4bae-bce3-4faa1156c249-kube-api-access-2h2l2\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356206 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-ovn-kubernetes\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356221 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-netns\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356249 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-openvswitch\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356265 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-var-lib-openvswitch\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356281 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356303 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-slash\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356321 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-systemd-units\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356339 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356358 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356374 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-netd\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356441 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-netd\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356481 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-node-log\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356707 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-netns\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356744 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-openvswitch\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356771 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-var-lib-openvswitch\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356805 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356832 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-slash\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356855 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-systemd-units\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.356910 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.356923 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356922 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-env-overrides\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.356933 4932 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.356960 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:25.356952927 +0000 UTC m=+25.482982490 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.356975 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-log-socket\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.357002 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.357013 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.357020 4932 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.357038 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:25.357032799 +0000 UTC m=+25.483062362 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.357064 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-ovn\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.357090 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-bin\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.357136 4932 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.357166 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:25.357154653 +0000 UTC m=+25.483184426 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.357214 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-etc-openvswitch\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.357252 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-script-lib\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.357559 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-config\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.363668 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.365163 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/24f5eec6-6332-4bae-bce3-4faa1156c249-ovn-node-metrics-cert\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.376548 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.378610 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2h2l2\" (UniqueName: \"kubernetes.io/projected/24f5eec6-6332-4bae-bce3-4faa1156c249-kube-api-access-2h2l2\") pod \"ovnkube-node-rlhks\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.391314 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.402907 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.414476 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.433431 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.433475 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.433484 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.433503 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.433516 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:23Z","lastTransitionTime":"2025-11-25T08:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.448387 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:23 crc kubenswrapper[4932]: W1125 08:49:23.462166 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24f5eec6_6332_4bae_bce3_4faa1156c249.slice/crio-7d41e285bee6cb58a899bfc23d9145c2061aba51c0d0a0adb738007d0f29c5f8 WatchSource:0}: Error finding container 7d41e285bee6cb58a899bfc23d9145c2061aba51c0d0a0adb738007d0f29c5f8: Status 404 returned error can't find the container with id 7d41e285bee6cb58a899bfc23d9145c2061aba51c0d0a0adb738007d0f29c5f8 Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.539815 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.539846 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.539854 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.539869 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.539881 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:23Z","lastTransitionTime":"2025-11-25T08:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.604986 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.605113 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.605167 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.605246 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.605310 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.605363 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.642976 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.643031 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.643040 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.643062 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.643074 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:23Z","lastTransitionTime":"2025-11-25T08:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.746528 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.746558 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.746567 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.746580 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.746590 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:23Z","lastTransitionTime":"2025-11-25T08:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.790711 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136" exitCode=0 Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.790809 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.790887 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"7d41e285bee6cb58a899bfc23d9145c2061aba51c0d0a0adb738007d0f29c5f8"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.795235 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.795297 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.810037 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.824039 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.841757 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.851129 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.852076 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.852115 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.852124 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.852140 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.852149 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:23Z","lastTransitionTime":"2025-11-25T08:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.855445 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.872904 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.881958 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.898198 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.899646 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.902614 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/199dbdf9-e2fc-459e-9e17-f5d520309f0a-multus-daemon-config\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.923845 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.937152 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.948924 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.951421 4932 configmap.go:193] Couldn't get configMap openshift-multus/cni-copy-resources: failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.951506 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/199dbdf9-e2fc-459e-9e17-f5d520309f0a-cni-binary-copy podName:199dbdf9-e2fc-459e-9e17-f5d520309f0a nodeName:}" failed. No retries permitted until 2025-11-25 08:49:24.451481252 +0000 UTC m=+24.577510815 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cni-binary-copy" (UniqueName: "kubernetes.io/configmap/199dbdf9-e2fc-459e-9e17-f5d520309f0a-cni-binary-copy") pod "multus-kvhb4" (UID: "199dbdf9-e2fc-459e-9e17-f5d520309f0a") : failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.951975 4932 configmap.go:193] Couldn't get configMap openshift-multus/default-cni-sysctl-allowlist: failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.952026 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-sysctl-allowlist podName:343dd8b2-7428-4b00-9c0a-00f728022d6d nodeName:}" failed. No retries permitted until 2025-11-25 08:49:24.452016516 +0000 UTC m=+24.578046079 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cni-sysctl-allowlist" (UniqueName: "kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-sysctl-allowlist") pod "multus-additional-cni-plugins-jmvtb" (UID: "343dd8b2-7428-4b00-9c0a-00f728022d6d") : failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.952140 4932 configmap.go:193] Couldn't get configMap openshift-multus/cni-copy-resources: failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.952340 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-binary-copy podName:343dd8b2-7428-4b00-9c0a-00f728022d6d nodeName:}" failed. No retries permitted until 2025-11-25 08:49:24.452312503 +0000 UTC m=+24.578342286 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cni-binary-copy" (UniqueName: "kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-binary-copy") pod "multus-additional-cni-plugins-jmvtb" (UID: "343dd8b2-7428-4b00-9c0a-00f728022d6d") : failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.954067 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.954112 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.954125 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.954141 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.954151 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:23Z","lastTransitionTime":"2025-11-25T08:49:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.965667 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.967812 4932 projected.go:288] Couldn't get configMap openshift-multus/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.967883 4932 projected.go:194] Error preparing data for projected volume kube-api-access-pbfbw for pod openshift-multus/multus-kvhb4: failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.967959 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/199dbdf9-e2fc-459e-9e17-f5d520309f0a-kube-api-access-pbfbw podName:199dbdf9-e2fc-459e-9e17-f5d520309f0a nodeName:}" failed. No retries permitted until 2025-11-25 08:49:24.467931341 +0000 UTC m=+24.593960904 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-pbfbw" (UniqueName: "kubernetes.io/projected/199dbdf9-e2fc-459e-9e17-f5d520309f0a-kube-api-access-pbfbw") pod "multus-kvhb4" (UID: "199dbdf9-e2fc-459e-9e17-f5d520309f0a") : failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.973662 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.976703 4932 projected.go:288] Couldn't get configMap openshift-multus/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.976786 4932 projected.go:194] Error preparing data for projected volume kube-api-access-5zlq2 for pod openshift-multus/multus-additional-cni-plugins-jmvtb: failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: E1125 08:49:23.976872 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/343dd8b2-7428-4b00-9c0a-00f728022d6d-kube-api-access-5zlq2 podName:343dd8b2-7428-4b00-9c0a-00f728022d6d nodeName:}" failed. No retries permitted until 2025-11-25 08:49:24.476831764 +0000 UTC m=+24.602861507 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-5zlq2" (UniqueName: "kubernetes.io/projected/343dd8b2-7428-4b00-9c0a-00f728022d6d-kube-api-access-5zlq2") pod "multus-additional-cni-plugins-jmvtb" (UID: "343dd8b2-7428-4b00-9c0a-00f728022d6d") : failed to sync configmap cache: timed out waiting for the condition Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.978623 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 08:49:23 crc kubenswrapper[4932]: I1125 08:49:23.989038 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.003068 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.023585 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.029488 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.042272 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.045268 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.050133 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.056939 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.056965 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.056974 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.056987 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.056997 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:24Z","lastTransitionTime":"2025-11-25T08:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.057136 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.070288 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.084297 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.097927 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.111470 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.130362 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.147046 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.159688 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.159721 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.159732 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.159749 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.159762 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:24Z","lastTransitionTime":"2025-11-25T08:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.159875 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.202588 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.227474 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.248611 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.250540 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.258423 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.268376 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.268410 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.268426 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.268442 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.268452 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:24Z","lastTransitionTime":"2025-11-25T08:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.298888 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.327205 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.341869 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.357662 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.370816 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.370858 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.370871 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.370888 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.370899 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:24Z","lastTransitionTime":"2025-11-25T08:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.393798 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.440011 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.470429 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-binary-copy\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.470494 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbfbw\" (UniqueName: \"kubernetes.io/projected/199dbdf9-e2fc-459e-9e17-f5d520309f0a-kube-api-access-pbfbw\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.470525 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.470567 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/199dbdf9-e2fc-459e-9e17-f5d520309f0a-cni-binary-copy\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.471466 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/199dbdf9-e2fc-459e-9e17-f5d520309f0a-cni-binary-copy\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.471704 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-binary-copy\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.471765 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/343dd8b2-7428-4b00-9c0a-00f728022d6d-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.475921 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbfbw\" (UniqueName: \"kubernetes.io/projected/199dbdf9-e2fc-459e-9e17-f5d520309f0a-kube-api-access-pbfbw\") pod \"multus-kvhb4\" (UID: \"199dbdf9-e2fc-459e-9e17-f5d520309f0a\") " pod="openshift-multus/multus-kvhb4" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.479408 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.479441 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.479453 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.479471 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.479485 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:24Z","lastTransitionTime":"2025-11-25T08:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.487523 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.512809 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.553870 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.571901 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zlq2\" (UniqueName: \"kubernetes.io/projected/343dd8b2-7428-4b00-9c0a-00f728022d6d-kube-api-access-5zlq2\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.575148 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zlq2\" (UniqueName: \"kubernetes.io/projected/343dd8b2-7428-4b00-9c0a-00f728022d6d-kube-api-access-5zlq2\") pod \"multus-additional-cni-plugins-jmvtb\" (UID: \"343dd8b2-7428-4b00-9c0a-00f728022d6d\") " pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.581363 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.581397 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.581407 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.581426 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.581435 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:24Z","lastTransitionTime":"2025-11-25T08:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.600312 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.609589 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.617120 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-kvhb4" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.640072 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: W1125 08:49:24.644541 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod199dbdf9_e2fc_459e_9e17_f5d520309f0a.slice/crio-995d053ba431aa8d92bdc3c9c52ae13823c922f5a98c78b7d6a71cfe573a4e1d WatchSource:0}: Error finding container 995d053ba431aa8d92bdc3c9c52ae13823c922f5a98c78b7d6a71cfe573a4e1d: Status 404 returned error can't find the container with id 995d053ba431aa8d92bdc3c9c52ae13823c922f5a98c78b7d6a71cfe573a4e1d Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.681774 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.683622 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.683642 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.683650 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.683662 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.683671 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:24Z","lastTransitionTime":"2025-11-25T08:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.721144 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.762044 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.786572 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.786634 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.786646 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.786663 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.786713 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:24Z","lastTransitionTime":"2025-11-25T08:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.800993 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.803227 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" event={"ID":"343dd8b2-7428-4b00-9c0a-00f728022d6d","Type":"ContainerStarted","Data":"b9f5bd43f7a5347edea4484a52b918452b6ac9f2d497b4e4b051631c4cf4163f"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.804301 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kvhb4" event={"ID":"199dbdf9-e2fc-459e-9e17-f5d520309f0a","Type":"ContainerStarted","Data":"995d053ba431aa8d92bdc3c9c52ae13823c922f5a98c78b7d6a71cfe573a4e1d"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.808091 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.808535 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.808551 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.821002 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.840793 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.876234 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.889702 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.889734 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.889743 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.889779 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.889788 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:24Z","lastTransitionTime":"2025-11-25T08:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.915310 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.955241 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.991714 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.991750 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.991759 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.991773 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.991783 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:24Z","lastTransitionTime":"2025-11-25T08:49:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:24 crc kubenswrapper[4932]: I1125 08:49:24.995702 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.037685 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.066238 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-8jl2g"] Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.066837 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-8jl2g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.074999 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.086502 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.094549 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.094591 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.094603 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.094622 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.094635 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:25Z","lastTransitionTime":"2025-11-25T08:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.106284 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.125520 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.146591 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.179431 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1c51936d-6aa7-4dcc-b09e-9a5211e49cb3-host\") pod \"node-ca-8jl2g\" (UID: \"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\") " pod="openshift-image-registry/node-ca-8jl2g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.179565 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmp5l\" (UniqueName: \"kubernetes.io/projected/1c51936d-6aa7-4dcc-b09e-9a5211e49cb3-kube-api-access-zmp5l\") pod \"node-ca-8jl2g\" (UID: \"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\") " pod="openshift-image-registry/node-ca-8jl2g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.179679 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/1c51936d-6aa7-4dcc-b09e-9a5211e49cb3-serviceca\") pod \"node-ca-8jl2g\" (UID: \"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\") " pod="openshift-image-registry/node-ca-8jl2g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.194408 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.196745 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.196780 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.196794 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.196810 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.196821 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:25Z","lastTransitionTime":"2025-11-25T08:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.235939 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.280497 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/1c51936d-6aa7-4dcc-b09e-9a5211e49cb3-serviceca\") pod \"node-ca-8jl2g\" (UID: \"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\") " pod="openshift-image-registry/node-ca-8jl2g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.280555 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1c51936d-6aa7-4dcc-b09e-9a5211e49cb3-host\") pod \"node-ca-8jl2g\" (UID: \"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\") " pod="openshift-image-registry/node-ca-8jl2g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.280592 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmp5l\" (UniqueName: \"kubernetes.io/projected/1c51936d-6aa7-4dcc-b09e-9a5211e49cb3-kube-api-access-zmp5l\") pod \"node-ca-8jl2g\" (UID: \"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\") " pod="openshift-image-registry/node-ca-8jl2g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.280780 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/1c51936d-6aa7-4dcc-b09e-9a5211e49cb3-host\") pod \"node-ca-8jl2g\" (UID: \"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\") " pod="openshift-image-registry/node-ca-8jl2g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.281759 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/1c51936d-6aa7-4dcc-b09e-9a5211e49cb3-serviceca\") pod \"node-ca-8jl2g\" (UID: \"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\") " pod="openshift-image-registry/node-ca-8jl2g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.283298 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.299727 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.300076 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.300088 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.300105 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.300115 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:25Z","lastTransitionTime":"2025-11-25T08:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.301946 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmp5l\" (UniqueName: \"kubernetes.io/projected/1c51936d-6aa7-4dcc-b09e-9a5211e49cb3-kube-api-access-zmp5l\") pod \"node-ca-8jl2g\" (UID: \"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\") " pod="openshift-image-registry/node-ca-8jl2g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.340404 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.379311 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.381681 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.381851 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:49:29.381826761 +0000 UTC m=+29.507856324 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.381895 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.381949 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.381992 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.382071 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382080 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382093 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382103 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382114 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382116 4932 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382129 4932 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382121 4932 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382170 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:29.38215828 +0000 UTC m=+29.508187843 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382178 4932 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382201 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:29.382178361 +0000 UTC m=+29.508207924 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382215 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:29.382208231 +0000 UTC m=+29.508237794 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.382228 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:29.382221392 +0000 UTC m=+29.508250955 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.394858 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-8jl2g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.402735 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.402771 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.402783 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.402802 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.402817 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:25Z","lastTransitionTime":"2025-11-25T08:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:25 crc kubenswrapper[4932]: W1125 08:49:25.406778 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c51936d_6aa7_4dcc_b09e_9a5211e49cb3.slice/crio-cfadf1f43e977f84070aeee4f5c3eefe00718389ea6815675a800e0801ebadab WatchSource:0}: Error finding container cfadf1f43e977f84070aeee4f5c3eefe00718389ea6815675a800e0801ebadab: Status 404 returned error can't find the container with id cfadf1f43e977f84070aeee4f5c3eefe00718389ea6815675a800e0801ebadab Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.418129 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.457629 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.505140 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.505181 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.505210 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.505226 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.505238 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:25Z","lastTransitionTime":"2025-11-25T08:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.510509 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.537761 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.576170 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.605549 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.605617 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.605675 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.605750 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.605796 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:25 crc kubenswrapper[4932]: E1125 08:49:25.605931 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.607950 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.607983 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.607994 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.608009 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.608020 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:25Z","lastTransitionTime":"2025-11-25T08:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.618758 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.654320 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.694090 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.710533 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.710579 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.710590 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.710606 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.710617 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:25Z","lastTransitionTime":"2025-11-25T08:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.733182 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.779908 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.812369 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.812415 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.812427 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.812445 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.812457 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:25Z","lastTransitionTime":"2025-11-25T08:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.814859 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.814917 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.814933 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.816374 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.817027 4932 generic.go:334] "Generic (PLEG): container finished" podID="343dd8b2-7428-4b00-9c0a-00f728022d6d" containerID="16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1" exitCode=0 Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.817081 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" event={"ID":"343dd8b2-7428-4b00-9c0a-00f728022d6d","Type":"ContainerDied","Data":"16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.820014 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kvhb4" event={"ID":"199dbdf9-e2fc-459e-9e17-f5d520309f0a","Type":"ContainerStarted","Data":"2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.822394 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-8jl2g" event={"ID":"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3","Type":"ContainerStarted","Data":"562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.822416 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-8jl2g" event={"ID":"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3","Type":"ContainerStarted","Data":"cfadf1f43e977f84070aeee4f5c3eefe00718389ea6815675a800e0801ebadab"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.853924 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.915294 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.915523 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.915591 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.915660 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.915724 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:25Z","lastTransitionTime":"2025-11-25T08:49:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.917418 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.939626 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:25 crc kubenswrapper[4932]: I1125 08:49:25.980114 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:25Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.012362 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.020558 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.020587 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.020596 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.020611 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.020621 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:26Z","lastTransitionTime":"2025-11-25T08:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.058656 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.094435 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.122800 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.122834 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.122846 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.122861 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.122872 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:26Z","lastTransitionTime":"2025-11-25T08:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.134016 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.174709 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.222954 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.224846 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.224889 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.224902 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.224918 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.224929 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:26Z","lastTransitionTime":"2025-11-25T08:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.259501 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.292341 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.326883 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.326921 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.326930 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.326944 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.326955 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:26Z","lastTransitionTime":"2025-11-25T08:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.339230 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.381948 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.415828 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.429260 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.429300 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.429309 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.429324 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.429336 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:26Z","lastTransitionTime":"2025-11-25T08:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.457242 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.496011 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.531545 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.531590 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.531604 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.531626 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.531640 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:26Z","lastTransitionTime":"2025-11-25T08:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.537705 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.574805 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.614466 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.634579 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.634614 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.634625 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.634640 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.634652 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:26Z","lastTransitionTime":"2025-11-25T08:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.737167 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.737240 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.737256 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.737299 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.737313 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:26Z","lastTransitionTime":"2025-11-25T08:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.828006 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" event={"ID":"343dd8b2-7428-4b00-9c0a-00f728022d6d","Type":"ContainerStarted","Data":"e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453"} Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.839732 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.839776 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.839785 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.839799 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.839809 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:26Z","lastTransitionTime":"2025-11-25T08:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.860783 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.879102 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.891690 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.905813 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.917459 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.929883 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.941804 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.941838 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.941846 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.941862 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.941872 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:26Z","lastTransitionTime":"2025-11-25T08:49:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.943091 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.955883 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:26 crc kubenswrapper[4932]: I1125 08:49:26.973582 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:26Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.012888 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.044270 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.044310 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.044322 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.044337 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.044348 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:27Z","lastTransitionTime":"2025-11-25T08:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.057603 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.100655 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.136383 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.146716 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.146748 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.146759 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.146782 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.146795 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:27Z","lastTransitionTime":"2025-11-25T08:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.177328 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.216088 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.248712 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.248748 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.248758 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.248773 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.248782 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:27Z","lastTransitionTime":"2025-11-25T08:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.256647 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.299285 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.337153 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.351561 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.351600 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.351611 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.351630 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.351641 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:27Z","lastTransitionTime":"2025-11-25T08:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.372981 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.415658 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.453665 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.453698 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.453708 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.453723 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.453734 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:27Z","lastTransitionTime":"2025-11-25T08:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.471353 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.503669 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.533628 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.555832 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.555896 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.555905 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.555920 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.555929 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:27Z","lastTransitionTime":"2025-11-25T08:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.583153 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.604846 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.604880 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.604880 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:27 crc kubenswrapper[4932]: E1125 08:49:27.604948 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:27 crc kubenswrapper[4932]: E1125 08:49:27.605070 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:27 crc kubenswrapper[4932]: E1125 08:49:27.605143 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.613797 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.653161 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.657658 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.657681 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.657689 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.657703 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.657712 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:27Z","lastTransitionTime":"2025-11-25T08:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.694415 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.736767 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.761082 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.761145 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.761166 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.761226 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.761246 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:27Z","lastTransitionTime":"2025-11-25T08:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.778061 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.814509 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.834316 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.836092 4932 generic.go:334] "Generic (PLEG): container finished" podID="343dd8b2-7428-4b00-9c0a-00f728022d6d" containerID="e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453" exitCode=0 Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.836131 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" event={"ID":"343dd8b2-7428-4b00-9c0a-00f728022d6d","Type":"ContainerDied","Data":"e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.864323 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.864372 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.864383 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.864400 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.864412 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:27Z","lastTransitionTime":"2025-11-25T08:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.866157 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.895900 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.934309 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.966274 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.966306 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.966318 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.966335 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.966346 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:27Z","lastTransitionTime":"2025-11-25T08:49:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:27 crc kubenswrapper[4932]: I1125 08:49:27.974687 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:27Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.014844 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.056492 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.068301 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.068332 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.068341 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.068354 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.068363 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:28Z","lastTransitionTime":"2025-11-25T08:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.093666 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.134425 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.171508 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.171574 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.171594 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.171634 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.171664 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:28Z","lastTransitionTime":"2025-11-25T08:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.176878 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.231972 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.257404 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.274051 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.274300 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.274420 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.274549 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.274658 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:28Z","lastTransitionTime":"2025-11-25T08:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.294661 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.336662 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.376479 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.376520 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.376530 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.376546 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.376557 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:28Z","lastTransitionTime":"2025-11-25T08:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.380261 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.420749 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.479052 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.479128 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.479164 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.479265 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.479293 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:28Z","lastTransitionTime":"2025-11-25T08:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.582390 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.582462 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.582481 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.582523 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.582540 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:28Z","lastTransitionTime":"2025-11-25T08:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.685312 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.685374 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.685392 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.685416 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.685434 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:28Z","lastTransitionTime":"2025-11-25T08:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.787721 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.787767 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.787777 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.787791 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.787801 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:28Z","lastTransitionTime":"2025-11-25T08:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.842134 4932 generic.go:334] "Generic (PLEG): container finished" podID="343dd8b2-7428-4b00-9c0a-00f728022d6d" containerID="7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b" exitCode=0 Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.842254 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" event={"ID":"343dd8b2-7428-4b00-9c0a-00f728022d6d","Type":"ContainerDied","Data":"7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b"} Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.865978 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.890344 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.890396 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.890417 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.890439 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.890458 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:28Z","lastTransitionTime":"2025-11-25T08:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.897818 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.911944 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.930287 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.948507 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.963911 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.976175 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.986889 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.993096 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.993120 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.993128 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.993141 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:28 crc kubenswrapper[4932]: I1125 08:49:28.993151 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:28Z","lastTransitionTime":"2025-11-25T08:49:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.002732 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:28Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.024999 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.039644 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.051763 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.067774 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.082301 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.096405 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.096439 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.096449 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.096465 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.096481 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:29Z","lastTransitionTime":"2025-11-25T08:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.097508 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.199433 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.199461 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.199468 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.199481 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.199489 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:29Z","lastTransitionTime":"2025-11-25T08:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.301950 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.302012 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.302042 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.302064 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.302082 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:29Z","lastTransitionTime":"2025-11-25T08:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.405900 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.405984 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.406004 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.406038 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.406062 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:29Z","lastTransitionTime":"2025-11-25T08:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.420840 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.420992 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421039 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:49:37.420998803 +0000 UTC m=+37.547028406 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.421099 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.421211 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.421313 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421319 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421390 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421418 4932 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421473 4932 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421540 4932 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421555 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421545 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:37.421513736 +0000 UTC m=+37.547543339 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421603 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421634 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:37.421617229 +0000 UTC m=+37.547646822 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421634 4932 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421664 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:37.42165137 +0000 UTC m=+37.547680963 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.421752 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:37.421711361 +0000 UTC m=+37.547740954 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.509756 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.509845 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.509868 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.509896 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.509915 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:29Z","lastTransitionTime":"2025-11-25T08:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.604908 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.605009 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.605037 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.605056 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.605304 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:29 crc kubenswrapper[4932]: E1125 08:49:29.605596 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.613509 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.613541 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.613552 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.613567 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.613577 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:29Z","lastTransitionTime":"2025-11-25T08:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.716019 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.716107 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.716149 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.716174 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.716216 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:29Z","lastTransitionTime":"2025-11-25T08:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.818984 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.819032 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.819042 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.819055 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.819065 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:29Z","lastTransitionTime":"2025-11-25T08:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.848703 4932 generic.go:334] "Generic (PLEG): container finished" podID="343dd8b2-7428-4b00-9c0a-00f728022d6d" containerID="c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db" exitCode=0 Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.848820 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" event={"ID":"343dd8b2-7428-4b00-9c0a-00f728022d6d","Type":"ContainerDied","Data":"c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.858371 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.858762 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.859003 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.859014 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.875773 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.898032 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.898520 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.899741 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.919266 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.921483 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.921565 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.921580 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.921597 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.921631 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:29Z","lastTransitionTime":"2025-11-25T08:49:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.932577 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.959175 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.975537 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:29 crc kubenswrapper[4932]: I1125 08:49:29.988403 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:29Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.003684 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.016663 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.024445 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.024513 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.024528 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.024550 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.024888 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:30Z","lastTransitionTime":"2025-11-25T08:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.031256 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.043678 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.056948 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.067905 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.081343 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.108560 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.120613 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.127372 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.127426 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.127441 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.127466 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.127483 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:30Z","lastTransitionTime":"2025-11-25T08:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.134711 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.147567 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.168115 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.179588 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.194688 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.211874 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.225347 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.229846 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.229869 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.229879 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.229894 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.229904 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:30Z","lastTransitionTime":"2025-11-25T08:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.241277 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.257666 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.270542 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.282383 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.298315 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.317851 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.336955 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.337001 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.337009 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.337026 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.337036 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:30Z","lastTransitionTime":"2025-11-25T08:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.359472 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.439671 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.439708 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.439719 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.439734 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.439744 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:30Z","lastTransitionTime":"2025-11-25T08:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.541909 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.541940 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.541948 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.541963 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.541973 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:30Z","lastTransitionTime":"2025-11-25T08:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.642867 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.644732 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.644763 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.644773 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.644790 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.644800 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:30Z","lastTransitionTime":"2025-11-25T08:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.657407 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.669502 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.683648 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.693785 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.709642 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.721664 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.746699 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.747882 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.747914 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.747935 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.748159 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:30Z","lastTransitionTime":"2025-11-25T08:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.754839 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.772631 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.790586 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.850882 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.851770 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.851948 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.852086 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.852250 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:30Z","lastTransitionTime":"2025-11-25T08:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.867410 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" event={"ID":"343dd8b2-7428-4b00-9c0a-00f728022d6d","Type":"ContainerStarted","Data":"895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676"} Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.869709 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.882291 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.900052 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.915082 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.934538 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.955107 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.955147 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.955160 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.955178 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.955210 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:30Z","lastTransitionTime":"2025-11-25T08:49:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.959145 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.972942 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:30 crc kubenswrapper[4932]: I1125 08:49:30.988832 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:30Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.003754 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.016908 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.055672 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.057702 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.057762 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.057780 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.057805 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.057825 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.105338 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.151056 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.160446 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.160490 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.160502 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.160517 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.160528 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.170425 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.170476 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.170493 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.170513 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.170529 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.175071 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: E1125 08:49:31.182834 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.186577 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.186621 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.186634 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.186653 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.186665 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: E1125 08:49:31.198072 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.201756 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.201807 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.201819 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.201834 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.201844 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: E1125 08:49:31.214790 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.215573 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.218629 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.218663 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.218671 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.218684 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.218693 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: E1125 08:49:31.234094 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.237387 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.237416 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.237425 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.237438 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.237448 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: E1125 08:49:31.247891 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: E1125 08:49:31.247998 4932 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.261515 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.262497 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.262517 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.262526 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.262538 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.262547 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.297240 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.339073 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.364734 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.364768 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.364778 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.364793 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.364805 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.375370 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.415239 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:31Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.467436 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.467497 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.467514 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.467539 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.467558 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.569523 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.569558 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.569567 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.569580 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.569590 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.605328 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.605411 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:31 crc kubenswrapper[4932]: E1125 08:49:31.605437 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:31 crc kubenswrapper[4932]: E1125 08:49:31.605518 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.605625 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:31 crc kubenswrapper[4932]: E1125 08:49:31.605832 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.672716 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.672790 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.672810 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.672840 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.672864 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.775098 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.775137 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.775149 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.775166 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.775178 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.878286 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.878346 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.878367 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.878393 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.878410 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.982070 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.982881 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.982978 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.983008 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:31 crc kubenswrapper[4932]: I1125 08:49:31.983024 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:31Z","lastTransitionTime":"2025-11-25T08:49:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.085444 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.085477 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.085487 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.085501 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.085512 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:32Z","lastTransitionTime":"2025-11-25T08:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.188337 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.188388 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.188402 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.188420 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.188433 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:32Z","lastTransitionTime":"2025-11-25T08:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.291132 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.291210 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.291220 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.291239 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.291253 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:32Z","lastTransitionTime":"2025-11-25T08:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.395407 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.395476 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.395489 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.395511 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.395548 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:32Z","lastTransitionTime":"2025-11-25T08:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.498623 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.498656 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.498666 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.498679 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.498687 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:32Z","lastTransitionTime":"2025-11-25T08:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.601289 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.601320 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.601329 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.601342 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.601351 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:32Z","lastTransitionTime":"2025-11-25T08:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.703743 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.703779 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.703787 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.703801 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.703820 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:32Z","lastTransitionTime":"2025-11-25T08:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.813056 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.813103 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.813111 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.813126 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.813135 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:32Z","lastTransitionTime":"2025-11-25T08:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.876411 4932 generic.go:334] "Generic (PLEG): container finished" podID="343dd8b2-7428-4b00-9c0a-00f728022d6d" containerID="895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676" exitCode=0 Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.876463 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" event={"ID":"343dd8b2-7428-4b00-9c0a-00f728022d6d","Type":"ContainerDied","Data":"895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676"} Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.898658 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:32Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.916164 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.916749 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.916841 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.916925 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.917010 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:32Z","lastTransitionTime":"2025-11-25T08:49:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.921895 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:32Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.941415 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:32Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.960366 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:32Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.976576 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:32Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:32 crc kubenswrapper[4932]: I1125 08:49:32.993978 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:32Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.004784 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.018684 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.019668 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.019702 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.019711 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.019738 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.019750 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:33Z","lastTransitionTime":"2025-11-25T08:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.039513 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.056668 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.071078 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.089086 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.102719 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.119427 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.121964 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.122072 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.122153 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.122254 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.122366 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:33Z","lastTransitionTime":"2025-11-25T08:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.130643 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.225425 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.225460 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.225486 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.225503 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.225512 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:33Z","lastTransitionTime":"2025-11-25T08:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.328150 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.328251 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.328270 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.328298 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.328318 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:33Z","lastTransitionTime":"2025-11-25T08:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.431700 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.431744 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.431756 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.431774 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.431786 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:33Z","lastTransitionTime":"2025-11-25T08:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.534619 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.534671 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.534686 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.534709 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.534724 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:33Z","lastTransitionTime":"2025-11-25T08:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.605393 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.605509 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.605509 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:33 crc kubenswrapper[4932]: E1125 08:49:33.605646 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:33 crc kubenswrapper[4932]: E1125 08:49:33.606157 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:33 crc kubenswrapper[4932]: E1125 08:49:33.606284 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.637355 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.637390 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.637399 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.637412 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.637424 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:33Z","lastTransitionTime":"2025-11-25T08:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.740586 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.740639 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.740651 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.740677 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.740694 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:33Z","lastTransitionTime":"2025-11-25T08:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.844068 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.844145 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.844171 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.844240 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.844287 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:33Z","lastTransitionTime":"2025-11-25T08:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.883803 4932 generic.go:334] "Generic (PLEG): container finished" podID="343dd8b2-7428-4b00-9c0a-00f728022d6d" containerID="c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44" exitCode=0 Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.883849 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" event={"ID":"343dd8b2-7428-4b00-9c0a-00f728022d6d","Type":"ContainerDied","Data":"c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44"} Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.957980 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.958062 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.958087 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.958118 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.958141 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:33Z","lastTransitionTime":"2025-11-25T08:49:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.960809 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:33 crc kubenswrapper[4932]: I1125 08:49:33.990118 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.000449 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:33Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.020495 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.036531 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.054162 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.061640 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.061676 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.061686 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.061699 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.061708 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:34Z","lastTransitionTime":"2025-11-25T08:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.065730 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.079776 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.091600 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.102481 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.113667 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.122705 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.134586 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.150992 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.164838 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.164877 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.164886 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.164899 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.164908 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:34Z","lastTransitionTime":"2025-11-25T08:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.167119 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.266879 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.266919 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.266930 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.266945 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.266958 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:34Z","lastTransitionTime":"2025-11-25T08:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.281038 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.301470 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.319458 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.339587 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.358748 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.369913 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.369993 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.370016 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.370047 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.370069 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:34Z","lastTransitionTime":"2025-11-25T08:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.385153 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.400936 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.404220 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x"] Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.404667 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.408094 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.408334 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.428022 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.461084 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.472554 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.472791 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.472900 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.473006 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.473109 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:34Z","lastTransitionTime":"2025-11-25T08:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.478633 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.481519 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9d294666-e880-455e-a17f-1f878dddc477-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.481640 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9d294666-e880-455e-a17f-1f878dddc477-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.481739 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9d294666-e880-455e-a17f-1f878dddc477-env-overrides\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.481825 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbnhz\" (UniqueName: \"kubernetes.io/projected/9d294666-e880-455e-a17f-1f878dddc477-kube-api-access-tbnhz\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.497004 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.511490 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.527839 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.546808 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.560686 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.575978 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.576207 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.576429 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.576606 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.576791 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:34Z","lastTransitionTime":"2025-11-25T08:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.582508 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9d294666-e880-455e-a17f-1f878dddc477-env-overrides\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.586481 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9d294666-e880-455e-a17f-1f878dddc477-env-overrides\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.587170 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbnhz\" (UniqueName: \"kubernetes.io/projected/9d294666-e880-455e-a17f-1f878dddc477-kube-api-access-tbnhz\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.587262 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9d294666-e880-455e-a17f-1f878dddc477-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.587303 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9d294666-e880-455e-a17f-1f878dddc477-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.587904 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9d294666-e880-455e-a17f-1f878dddc477-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.594320 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9d294666-e880-455e-a17f-1f878dddc477-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.595072 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.608679 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.610846 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbnhz\" (UniqueName: \"kubernetes.io/projected/9d294666-e880-455e-a17f-1f878dddc477-kube-api-access-tbnhz\") pod \"ovnkube-control-plane-749d76644c-fft4x\" (UID: \"9d294666-e880-455e-a17f-1f878dddc477\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.627767 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.640746 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.652724 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.667377 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.677242 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.678724 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.678772 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.678781 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.678799 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.678811 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:34Z","lastTransitionTime":"2025-11-25T08:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.689424 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.700627 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.712636 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.725945 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.725985 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" Nov 25 08:49:34 crc kubenswrapper[4932]: W1125 08:49:34.742433 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d294666_e880_455e_a17f_1f878dddc477.slice/crio-6ea8f95426fcea36d24d39f8ad90b074870b2e558a5acf2c15371e2372e53cb9 WatchSource:0}: Error finding container 6ea8f95426fcea36d24d39f8ad90b074870b2e558a5acf2c15371e2372e53cb9: Status 404 returned error can't find the container with id 6ea8f95426fcea36d24d39f8ad90b074870b2e558a5acf2c15371e2372e53cb9 Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.742667 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.757613 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.770432 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.782368 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.782415 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.782425 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.782442 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.782454 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:34Z","lastTransitionTime":"2025-11-25T08:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.785789 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.806117 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.821546 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.884676 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.884736 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.884751 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.884767 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.884779 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:34Z","lastTransitionTime":"2025-11-25T08:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.888874 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" event={"ID":"343dd8b2-7428-4b00-9c0a-00f728022d6d","Type":"ContainerStarted","Data":"ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.889743 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" event={"ID":"9d294666-e880-455e-a17f-1f878dddc477","Type":"ContainerStarted","Data":"6ea8f95426fcea36d24d39f8ad90b074870b2e558a5acf2c15371e2372e53cb9"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.902778 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.914657 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.928612 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.942712 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.960895 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.973294 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.987718 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.987761 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.987771 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.987787 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.987798 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:34Z","lastTransitionTime":"2025-11-25T08:49:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:34 crc kubenswrapper[4932]: I1125 08:49:34.989418 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:34Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.006763 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.025726 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.038464 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.050948 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.063393 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.084243 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.090076 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.090142 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.090152 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.090196 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.090207 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:35Z","lastTransitionTime":"2025-11-25T08:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.096864 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.114160 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.127543 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.192000 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.192036 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.192046 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.192061 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.192071 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:35Z","lastTransitionTime":"2025-11-25T08:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.294511 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.294825 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.294842 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.294869 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.294887 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:35Z","lastTransitionTime":"2025-11-25T08:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.397363 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.397410 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.397427 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.397449 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.397465 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:35Z","lastTransitionTime":"2025-11-25T08:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.500387 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.500442 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.500460 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.500485 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.500502 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:35Z","lastTransitionTime":"2025-11-25T08:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.603490 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.603530 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.603541 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.603558 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.603573 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:35Z","lastTransitionTime":"2025-11-25T08:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.604920 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.605007 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:35 crc kubenswrapper[4932]: E1125 08:49:35.605605 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:35 crc kubenswrapper[4932]: E1125 08:49:35.605758 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.605007 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:35 crc kubenswrapper[4932]: E1125 08:49:35.605857 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.706882 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.706918 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.706932 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.706952 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.706968 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:35Z","lastTransitionTime":"2025-11-25T08:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.809169 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.809227 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.809236 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.809249 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.809260 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:35Z","lastTransitionTime":"2025-11-25T08:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.895994 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" event={"ID":"9d294666-e880-455e-a17f-1f878dddc477","Type":"ContainerStarted","Data":"44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.896384 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" event={"ID":"9d294666-e880-455e-a17f-1f878dddc477","Type":"ContainerStarted","Data":"f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.898673 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/0.log" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.901924 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb" exitCode=1 Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.901969 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.902829 4932 scope.go:117] "RemoveContainer" containerID="080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.911632 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.911661 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.911672 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.911689 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.911633 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.911701 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:35Z","lastTransitionTime":"2025-11-25T08:49:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.931413 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.948452 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.962241 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.972656 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.986755 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:35 crc kubenswrapper[4932]: I1125 08:49:35.998005 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:35Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.009859 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.014101 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.014152 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.014170 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.014223 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.014254 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:36Z","lastTransitionTime":"2025-11-25T08:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.021274 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.046316 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.070350 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.089353 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.105113 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.117675 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.117727 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.117740 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.117759 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.117770 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:36Z","lastTransitionTime":"2025-11-25T08:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.126442 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.153068 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.169702 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.187837 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.202867 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.220067 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.220125 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.220140 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.220160 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.220176 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:36Z","lastTransitionTime":"2025-11-25T08:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.222048 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.236043 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.249149 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.262490 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.273117 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.283772 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.292413 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.307104 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.312812 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-fvbqs"] Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.313289 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:36 crc kubenswrapper[4932]: E1125 08:49:36.313342 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.322568 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.322602 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.322614 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.322630 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.322642 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:36Z","lastTransitionTime":"2025-11-25T08:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.327463 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.338680 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.378076 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.389999 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.407767 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.407812 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5nh2\" (UniqueName: \"kubernetes.io/projected/58f40128-d3fc-4588-ad8f-8cf129079911-kube-api-access-c5nh2\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.413545 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"message\\\":\\\"9:35.185646 6163 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 08:49:35.185676 6163 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 08:49:35.186152 6163 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:35.186172 6163 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:35.186180 6163 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 08:49:35.186673 6163 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 08:49:35.186718 6163 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 08:49:35.186736 6163 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 08:49:35.186761 6163 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 08:49:35.186771 6163 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 08:49:35.186801 6163 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 08:49:35.186806 6163 factory.go:656] Stopping watch factory\\\\nI1125 08:49:35.186821 6163 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 08:49:35.186831 6163 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 08:49:35.186831 6163 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:35.186839 6163 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 08\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.425279 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.425326 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.425337 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.425351 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.425361 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:36Z","lastTransitionTime":"2025-11-25T08:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.427030 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.450402 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"message\\\":\\\"9:35.185646 6163 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 08:49:35.185676 6163 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 08:49:35.186152 6163 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:35.186172 6163 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:35.186180 6163 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 08:49:35.186673 6163 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 08:49:35.186718 6163 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 08:49:35.186736 6163 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 08:49:35.186761 6163 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 08:49:35.186771 6163 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 08:49:35.186801 6163 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 08:49:35.186806 6163 factory.go:656] Stopping watch factory\\\\nI1125 08:49:35.186821 6163 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 08:49:35.186831 6163 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 08:49:35.186831 6163 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:35.186839 6163 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 08\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.466244 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.482728 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.496267 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.510351 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.510408 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5nh2\" (UniqueName: \"kubernetes.io/projected/58f40128-d3fc-4588-ad8f-8cf129079911-kube-api-access-c5nh2\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:36 crc kubenswrapper[4932]: E1125 08:49:36.510585 4932 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:36 crc kubenswrapper[4932]: E1125 08:49:36.510696 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs podName:58f40128-d3fc-4588-ad8f-8cf129079911 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:37.010669758 +0000 UTC m=+37.136699491 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs") pod "network-metrics-daemon-fvbqs" (UID: "58f40128-d3fc-4588-ad8f-8cf129079911") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.511135 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.526300 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.527772 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.527810 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.527822 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.527839 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.527851 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:36Z","lastTransitionTime":"2025-11-25T08:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.532147 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5nh2\" (UniqueName: \"kubernetes.io/projected/58f40128-d3fc-4588-ad8f-8cf129079911-kube-api-access-c5nh2\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.540928 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.553238 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.567165 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.588603 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.603288 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.624911 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.630458 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.630488 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.630499 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.630514 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.630526 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:36Z","lastTransitionTime":"2025-11-25T08:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.641913 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.657290 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.669944 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.683762 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.702461 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:36Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.733435 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.733495 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.733509 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.733527 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.733538 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:36Z","lastTransitionTime":"2025-11-25T08:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.836101 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.836134 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.836142 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.836155 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.836164 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:36Z","lastTransitionTime":"2025-11-25T08:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.939380 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.939436 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.939453 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.939477 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:36 crc kubenswrapper[4932]: I1125 08:49:36.939497 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:36Z","lastTransitionTime":"2025-11-25T08:49:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.016998 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.017170 4932 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.017313 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs podName:58f40128-d3fc-4588-ad8f-8cf129079911 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:38.017276977 +0000 UTC m=+38.143306590 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs") pod "network-metrics-daemon-fvbqs" (UID: "58f40128-d3fc-4588-ad8f-8cf129079911") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.042350 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.042413 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.042429 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.042452 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.042467 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:37Z","lastTransitionTime":"2025-11-25T08:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.145795 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.145891 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.145912 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.145947 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.145970 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:37Z","lastTransitionTime":"2025-11-25T08:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.249479 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.249517 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.249533 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.249555 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.249572 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:37Z","lastTransitionTime":"2025-11-25T08:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.353471 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.353545 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.353566 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.353592 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.353613 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:37Z","lastTransitionTime":"2025-11-25T08:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.422178 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.422467 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:49:53.422415045 +0000 UTC m=+53.548444658 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.422676 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.422807 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.422873 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.422917 4932 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.422949 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.423027 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:53.42299872 +0000 UTC m=+53.549028453 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.423088 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.423136 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.423157 4932 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.423158 4932 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.423263 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:53.423231476 +0000 UTC m=+53.549261069 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.423298 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:53.423279857 +0000 UTC m=+53.549309450 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.423406 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.423443 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.423469 4932 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.423542 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:53.423514494 +0000 UTC m=+53.549544237 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.456503 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.456558 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.456574 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.456595 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.456609 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:37Z","lastTransitionTime":"2025-11-25T08:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.559590 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.559649 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.559665 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.559687 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.559704 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:37Z","lastTransitionTime":"2025-11-25T08:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.605938 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.605988 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.606010 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.605950 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.606129 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.606396 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.606518 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:37 crc kubenswrapper[4932]: E1125 08:49:37.606631 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.662672 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.662731 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.662743 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.662763 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.662776 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:37Z","lastTransitionTime":"2025-11-25T08:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.767241 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.767327 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.767809 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.767896 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.768405 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:37Z","lastTransitionTime":"2025-11-25T08:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.876954 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.877101 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.877353 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.877676 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.878339 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:37Z","lastTransitionTime":"2025-11-25T08:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.916955 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/0.log" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.920860 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc"} Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.980610 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.980649 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.980658 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.980671 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:37 crc kubenswrapper[4932]: I1125 08:49:37.980682 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:37Z","lastTransitionTime":"2025-11-25T08:49:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.076770 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:38 crc kubenswrapper[4932]: E1125 08:49:38.077418 4932 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:38 crc kubenswrapper[4932]: E1125 08:49:38.077495 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs podName:58f40128-d3fc-4588-ad8f-8cf129079911 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:40.0774772 +0000 UTC m=+40.203506763 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs") pod "network-metrics-daemon-fvbqs" (UID: "58f40128-d3fc-4588-ad8f-8cf129079911") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.083245 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.084615 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.084633 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.084651 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.084664 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:38Z","lastTransitionTime":"2025-11-25T08:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.188762 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.188846 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.188869 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.188905 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.188930 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:38Z","lastTransitionTime":"2025-11-25T08:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.291543 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.291596 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.291614 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.291638 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.291653 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:38Z","lastTransitionTime":"2025-11-25T08:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.395120 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.395414 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.395584 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.395947 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.396341 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:38Z","lastTransitionTime":"2025-11-25T08:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.499499 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.499578 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.499595 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.499617 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.499635 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:38Z","lastTransitionTime":"2025-11-25T08:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.602399 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.602705 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.602788 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.602891 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.602951 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:38Z","lastTransitionTime":"2025-11-25T08:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.704841 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.704879 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.704889 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.704903 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.704912 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:38Z","lastTransitionTime":"2025-11-25T08:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.806569 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.806762 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.806823 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.806880 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.806932 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:38Z","lastTransitionTime":"2025-11-25T08:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.908824 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.908869 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.908883 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.908902 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.908916 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:38Z","lastTransitionTime":"2025-11-25T08:49:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.922976 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.950705 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"message\\\":\\\"9:35.185646 6163 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 08:49:35.185676 6163 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 08:49:35.186152 6163 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:35.186172 6163 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:35.186180 6163 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 08:49:35.186673 6163 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 08:49:35.186718 6163 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 08:49:35.186736 6163 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 08:49:35.186761 6163 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 08:49:35.186771 6163 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 08:49:35.186801 6163 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 08:49:35.186806 6163 factory.go:656] Stopping watch factory\\\\nI1125 08:49:35.186821 6163 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 08:49:35.186831 6163 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 08:49:35.186831 6163 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:35.186839 6163 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 08\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:38Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.968002 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:38Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.980749 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:38Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:38 crc kubenswrapper[4932]: I1125 08:49:38.993924 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:38Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.007083 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.011176 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.011302 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.011327 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.011357 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.011380 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:39Z","lastTransitionTime":"2025-11-25T08:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.027238 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.046001 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.060530 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.082177 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.098248 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.111001 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.113452 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.113489 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.113497 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.113509 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.113519 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:39Z","lastTransitionTime":"2025-11-25T08:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.125268 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.144793 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.158135 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.169463 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.182628 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.198726 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.215486 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.215520 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.215529 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.215544 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.215554 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:39Z","lastTransitionTime":"2025-11-25T08:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.317608 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.317672 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.317686 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.317701 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.317713 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:39Z","lastTransitionTime":"2025-11-25T08:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.421243 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.421308 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.421335 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.421366 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.421390 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:39Z","lastTransitionTime":"2025-11-25T08:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.523617 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.523667 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.523679 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.523695 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.523706 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:39Z","lastTransitionTime":"2025-11-25T08:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.605710 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.605804 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:39 crc kubenswrapper[4932]: E1125 08:49:39.605825 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.605710 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.605713 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:39 crc kubenswrapper[4932]: E1125 08:49:39.605928 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:39 crc kubenswrapper[4932]: E1125 08:49:39.606047 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:39 crc kubenswrapper[4932]: E1125 08:49:39.606125 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.626323 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.626516 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.626573 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.626630 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.626683 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:39Z","lastTransitionTime":"2025-11-25T08:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.729182 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.729220 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.729229 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.729242 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.729251 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:39Z","lastTransitionTime":"2025-11-25T08:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.832678 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.832905 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.832979 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.832998 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.833013 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:39Z","lastTransitionTime":"2025-11-25T08:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.928830 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/1.log" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.930137 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/0.log" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.934355 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc" exitCode=1 Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.934424 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc"} Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.934509 4932 scope.go:117] "RemoveContainer" containerID="080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.934728 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.934783 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.934805 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.934833 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.934856 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:39Z","lastTransitionTime":"2025-11-25T08:49:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.935419 4932 scope.go:117] "RemoveContainer" containerID="b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc" Nov 25 08:49:39 crc kubenswrapper[4932]: E1125 08:49:39.935669 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.968705 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"message\\\":\\\"9:35.185646 6163 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 08:49:35.185676 6163 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 08:49:35.186152 6163 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:35.186172 6163 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:35.186180 6163 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 08:49:35.186673 6163 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 08:49:35.186718 6163 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 08:49:35.186736 6163 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 08:49:35.186761 6163 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 08:49:35.186771 6163 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 08:49:35.186801 6163 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 08:49:35.186806 6163 factory.go:656] Stopping watch factory\\\\nI1125 08:49:35.186821 6163 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 08:49:35.186831 6163 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 08:49:35.186831 6163 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:35.186839 6163 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 08\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:39Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI1125 08:49:39.103135 6402 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI1125 08:49:39.103160 6402 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1125 08:49:39.103261 6402 factory.go:1336] Added *v1.Node event handler 7\\\\nI1125 08:49:39.103323 6402 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1125 08:49:39.103623 6402 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 08:49:39.103650 6402 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 08:49:39.103662 6402 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1125 08:49:39.103687 6402 factory.go:656] Stopping watch factory\\\\nI1125 08:49:39.103696 6402 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:39.103719 6402 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:39.103791 6402 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1125 08:49:39.103843 6402 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:39.103869 6402 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 08:49:39.103969 6402 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.983614 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:39 crc kubenswrapper[4932]: I1125 08:49:39.996371 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:39Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.011545 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.026134 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.037702 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.037796 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.037821 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.037854 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.037886 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:40Z","lastTransitionTime":"2025-11-25T08:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.043800 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.058929 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.075821 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.091009 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.102301 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:40 crc kubenswrapper[4932]: E1125 08:49:40.102672 4932 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:40 crc kubenswrapper[4932]: E1125 08:49:40.102756 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs podName:58f40128-d3fc-4588-ad8f-8cf129079911 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:44.102734285 +0000 UTC m=+44.228764038 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs") pod "network-metrics-daemon-fvbqs" (UID: "58f40128-d3fc-4588-ad8f-8cf129079911") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.104646 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.119647 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.135806 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.139881 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.140505 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.140525 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.140543 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.140557 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:40Z","lastTransitionTime":"2025-11-25T08:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.148550 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.163298 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.175465 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.187729 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.199843 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.243954 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.244008 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.244022 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.244042 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.244056 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:40Z","lastTransitionTime":"2025-11-25T08:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.346971 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.347032 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.347061 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.347106 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.347127 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:40Z","lastTransitionTime":"2025-11-25T08:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.458226 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.458274 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.458285 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.458301 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.458312 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:40Z","lastTransitionTime":"2025-11-25T08:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.560436 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.560478 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.560490 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.560505 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.560517 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:40Z","lastTransitionTime":"2025-11-25T08:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.619039 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.638184 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://080d306d329949d7c9354cf97c15060e4787e2c1377d80aeaefc3b117e654ccb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"message\\\":\\\"9:35.185646 6163 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 08:49:35.185676 6163 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 08:49:35.186152 6163 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:35.186172 6163 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:35.186180 6163 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 08:49:35.186673 6163 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 08:49:35.186718 6163 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 08:49:35.186736 6163 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 08:49:35.186761 6163 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 08:49:35.186771 6163 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 08:49:35.186801 6163 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 08:49:35.186806 6163 factory.go:656] Stopping watch factory\\\\nI1125 08:49:35.186821 6163 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 08:49:35.186831 6163 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 08:49:35.186831 6163 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:35.186839 6163 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 08\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:39Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI1125 08:49:39.103135 6402 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI1125 08:49:39.103160 6402 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1125 08:49:39.103261 6402 factory.go:1336] Added *v1.Node event handler 7\\\\nI1125 08:49:39.103323 6402 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1125 08:49:39.103623 6402 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 08:49:39.103650 6402 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 08:49:39.103662 6402 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1125 08:49:39.103687 6402 factory.go:656] Stopping watch factory\\\\nI1125 08:49:39.103696 6402 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:39.103719 6402 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:39.103791 6402 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1125 08:49:39.103843 6402 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:39.103869 6402 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 08:49:39.103969 6402 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.650538 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.661857 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.662900 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.662941 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.662952 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.662968 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.662979 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:40Z","lastTransitionTime":"2025-11-25T08:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.675812 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.684597 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.693298 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.705115 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.715988 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.730156 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.750681 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.763667 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.765142 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.765163 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.765171 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.765205 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.765217 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:40Z","lastTransitionTime":"2025-11-25T08:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.775793 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.784542 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.795722 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.816376 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.830634 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.868007 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.868074 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.868085 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.868106 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.868121 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:40Z","lastTransitionTime":"2025-11-25T08:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.939010 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/1.log" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.943035 4932 scope.go:117] "RemoveContainer" containerID="b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc" Nov 25 08:49:40 crc kubenswrapper[4932]: E1125 08:49:40.943297 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.964742 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:39Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI1125 08:49:39.103135 6402 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI1125 08:49:39.103160 6402 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1125 08:49:39.103261 6402 factory.go:1336] Added *v1.Node event handler 7\\\\nI1125 08:49:39.103323 6402 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1125 08:49:39.103623 6402 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 08:49:39.103650 6402 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 08:49:39.103662 6402 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1125 08:49:39.103687 6402 factory.go:656] Stopping watch factory\\\\nI1125 08:49:39.103696 6402 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:39.103719 6402 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:39.103791 6402 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1125 08:49:39.103843 6402 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:39.103869 6402 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 08:49:39.103969 6402 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.971315 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.971364 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.971376 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.971393 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.971404 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:40Z","lastTransitionTime":"2025-11-25T08:49:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.980949 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:40 crc kubenswrapper[4932]: I1125 08:49:40.996248 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:40Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.008924 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.021678 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.039639 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.051747 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.062162 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.073723 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.073767 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.073776 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.073788 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.073796 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.074582 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.087766 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.101421 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.114284 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.125699 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.139045 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.147587 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.157930 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.175733 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.175784 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.175796 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.175811 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.175880 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.175823 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.278520 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.278564 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.278575 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.278594 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.278611 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.352399 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.352449 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.352460 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.352477 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.352488 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: E1125 08:49:41.365570 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.369342 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.369372 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.369382 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.369397 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.369409 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: E1125 08:49:41.387177 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.391973 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.391998 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.392005 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.392016 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.392024 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: E1125 08:49:41.409567 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.413911 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.413970 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.413989 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.414013 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.414030 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: E1125 08:49:41.433064 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.437264 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.437311 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.437322 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.437338 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.437349 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: E1125 08:49:41.454632 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:41Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:41 crc kubenswrapper[4932]: E1125 08:49:41.454881 4932 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.456668 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.456729 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.456749 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.456773 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.456790 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.559795 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.559863 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.559886 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.559916 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.559938 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.605633 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.605654 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.605705 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.605705 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:41 crc kubenswrapper[4932]: E1125 08:49:41.605829 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:41 crc kubenswrapper[4932]: E1125 08:49:41.605891 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:41 crc kubenswrapper[4932]: E1125 08:49:41.605963 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:41 crc kubenswrapper[4932]: E1125 08:49:41.606092 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.662816 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.662866 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.662881 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.662903 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.662917 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.765721 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.765773 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.765787 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.765806 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.765822 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.869289 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.869354 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.869372 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.869395 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.869414 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.973643 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.973712 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.973724 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.973744 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:41 crc kubenswrapper[4932]: I1125 08:49:41.973756 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:41Z","lastTransitionTime":"2025-11-25T08:49:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.076578 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.076614 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.076625 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.076639 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.076650 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:42Z","lastTransitionTime":"2025-11-25T08:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.180640 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.180707 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.180718 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.180733 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.180744 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:42Z","lastTransitionTime":"2025-11-25T08:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.283569 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.283604 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.283612 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.283624 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.283633 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:42Z","lastTransitionTime":"2025-11-25T08:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.385908 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.385956 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.385974 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.385995 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.386007 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:42Z","lastTransitionTime":"2025-11-25T08:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.488732 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.488773 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.488783 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.488801 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.488812 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:42Z","lastTransitionTime":"2025-11-25T08:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.591324 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.591356 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.591365 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.591380 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.591390 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:42Z","lastTransitionTime":"2025-11-25T08:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.695707 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.695774 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.695793 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.695823 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.695840 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:42Z","lastTransitionTime":"2025-11-25T08:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.799074 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.799155 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.799177 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.799248 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.799274 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:42Z","lastTransitionTime":"2025-11-25T08:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.902355 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.902431 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.902455 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.902486 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:42 crc kubenswrapper[4932]: I1125 08:49:42.902508 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:42Z","lastTransitionTime":"2025-11-25T08:49:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.004790 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.004833 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.004851 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.004868 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.004878 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:43Z","lastTransitionTime":"2025-11-25T08:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.107380 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.107414 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.107424 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.107437 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.107447 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:43Z","lastTransitionTime":"2025-11-25T08:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.210260 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.210332 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.210354 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.210382 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.210406 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:43Z","lastTransitionTime":"2025-11-25T08:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.312514 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.312547 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.312558 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.312573 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.312582 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:43Z","lastTransitionTime":"2025-11-25T08:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.415891 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.415973 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.415999 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.416028 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.416054 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:43Z","lastTransitionTime":"2025-11-25T08:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.519668 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.520058 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.520518 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.520860 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.521216 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:43Z","lastTransitionTime":"2025-11-25T08:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.605876 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.605900 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.605945 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.605963 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:43 crc kubenswrapper[4932]: E1125 08:49:43.606815 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:43 crc kubenswrapper[4932]: E1125 08:49:43.606954 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:43 crc kubenswrapper[4932]: E1125 08:49:43.607072 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:43 crc kubenswrapper[4932]: E1125 08:49:43.607169 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.624866 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.624928 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.624945 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.624970 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.624988 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:43Z","lastTransitionTime":"2025-11-25T08:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.727467 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.727511 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.727530 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.727549 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.727561 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:43Z","lastTransitionTime":"2025-11-25T08:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.830215 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.830270 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.830292 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.830316 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.830337 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:43Z","lastTransitionTime":"2025-11-25T08:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.933896 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.933933 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.933948 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.933970 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:43 crc kubenswrapper[4932]: I1125 08:49:43.933986 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:43Z","lastTransitionTime":"2025-11-25T08:49:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.037336 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.037405 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.037429 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.037459 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.037482 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:44Z","lastTransitionTime":"2025-11-25T08:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.141049 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.141125 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.141144 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.141169 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.141216 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:44Z","lastTransitionTime":"2025-11-25T08:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.143850 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:44 crc kubenswrapper[4932]: E1125 08:49:44.144101 4932 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:44 crc kubenswrapper[4932]: E1125 08:49:44.144182 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs podName:58f40128-d3fc-4588-ad8f-8cf129079911 nodeName:}" failed. No retries permitted until 2025-11-25 08:49:52.144160585 +0000 UTC m=+52.270190178 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs") pod "network-metrics-daemon-fvbqs" (UID: "58f40128-d3fc-4588-ad8f-8cf129079911") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.244042 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.244084 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.244096 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.244113 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.244124 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:44Z","lastTransitionTime":"2025-11-25T08:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.348040 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.348154 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.348170 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.348203 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.348248 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:44Z","lastTransitionTime":"2025-11-25T08:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.451266 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.451342 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.451363 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.451390 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.451409 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:44Z","lastTransitionTime":"2025-11-25T08:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.555396 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.555453 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.555470 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.555496 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.555518 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:44Z","lastTransitionTime":"2025-11-25T08:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.658697 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.658753 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.658764 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.658784 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.658795 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:44Z","lastTransitionTime":"2025-11-25T08:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.762770 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.762843 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.762862 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.762888 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.762906 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:44Z","lastTransitionTime":"2025-11-25T08:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.866653 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.866706 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.866718 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.866738 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.866751 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:44Z","lastTransitionTime":"2025-11-25T08:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.969468 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.969521 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.969536 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.969555 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:44 crc kubenswrapper[4932]: I1125 08:49:44.969570 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:44Z","lastTransitionTime":"2025-11-25T08:49:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.072172 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.072231 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.072240 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.072253 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.072263 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:45Z","lastTransitionTime":"2025-11-25T08:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.174877 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.174954 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.174973 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.174996 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.175014 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:45Z","lastTransitionTime":"2025-11-25T08:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.277409 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.277488 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.277510 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.277539 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.277561 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:45Z","lastTransitionTime":"2025-11-25T08:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.380696 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.380763 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.380781 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.380806 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.380824 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:45Z","lastTransitionTime":"2025-11-25T08:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.484353 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.484433 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.484484 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.484543 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.484563 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:45Z","lastTransitionTime":"2025-11-25T08:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.587268 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.587346 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.587373 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.587403 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.587425 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:45Z","lastTransitionTime":"2025-11-25T08:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.605933 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.606017 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.605938 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:45 crc kubenswrapper[4932]: E1125 08:49:45.606075 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:45 crc kubenswrapper[4932]: E1125 08:49:45.606158 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.605952 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:45 crc kubenswrapper[4932]: E1125 08:49:45.606262 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:45 crc kubenswrapper[4932]: E1125 08:49:45.606449 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.689920 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.690022 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.690044 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.690074 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.690096 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:45Z","lastTransitionTime":"2025-11-25T08:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.792733 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.792826 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.792840 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.792860 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.792871 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:45Z","lastTransitionTime":"2025-11-25T08:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.896149 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.896234 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.896261 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.896284 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.896303 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:45Z","lastTransitionTime":"2025-11-25T08:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.999804 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.999859 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.999872 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:45 crc kubenswrapper[4932]: I1125 08:49:45.999891 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:45.999905 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:45Z","lastTransitionTime":"2025-11-25T08:49:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.103600 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.103643 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.103650 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.103664 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.103675 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:46Z","lastTransitionTime":"2025-11-25T08:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.207598 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.207649 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.207663 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.207684 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.207700 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:46Z","lastTransitionTime":"2025-11-25T08:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.310911 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.310965 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.310980 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.311001 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.311014 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:46Z","lastTransitionTime":"2025-11-25T08:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.414912 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.414967 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.414984 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.415008 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.415027 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:46Z","lastTransitionTime":"2025-11-25T08:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.517734 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.517812 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.517835 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.517863 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.517881 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:46Z","lastTransitionTime":"2025-11-25T08:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.620835 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.620877 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.620892 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.620909 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.620921 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:46Z","lastTransitionTime":"2025-11-25T08:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.723885 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.723935 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.723947 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.723995 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.724008 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:46Z","lastTransitionTime":"2025-11-25T08:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.827834 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.827896 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.827913 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.827937 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.827982 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:46Z","lastTransitionTime":"2025-11-25T08:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.932292 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.932336 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.932347 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.932363 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:46 crc kubenswrapper[4932]: I1125 08:49:46.932375 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:46Z","lastTransitionTime":"2025-11-25T08:49:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.036351 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.036409 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.036425 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.036448 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.036465 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:47Z","lastTransitionTime":"2025-11-25T08:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.138956 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.138999 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.139009 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.139023 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.139033 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:47Z","lastTransitionTime":"2025-11-25T08:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.241469 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.241508 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.241517 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.241534 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.241545 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:47Z","lastTransitionTime":"2025-11-25T08:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.344329 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.344395 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.344413 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.344438 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.344455 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:47Z","lastTransitionTime":"2025-11-25T08:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.448137 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.448232 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.448249 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.448273 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.448291 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:47Z","lastTransitionTime":"2025-11-25T08:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.552882 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.552999 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.553016 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.553040 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.553056 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:47Z","lastTransitionTime":"2025-11-25T08:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.604958 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.604989 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.605076 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.605339 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:47 crc kubenswrapper[4932]: E1125 08:49:47.605322 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:47 crc kubenswrapper[4932]: E1125 08:49:47.605634 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:47 crc kubenswrapper[4932]: E1125 08:49:47.605692 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:47 crc kubenswrapper[4932]: E1125 08:49:47.605791 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.655702 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.656107 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.656391 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.656569 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.656711 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:47Z","lastTransitionTime":"2025-11-25T08:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.760038 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.760105 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.760122 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.760147 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.760169 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:47Z","lastTransitionTime":"2025-11-25T08:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.863506 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.863558 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.863570 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.863588 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.863600 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:47Z","lastTransitionTime":"2025-11-25T08:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.966426 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.966489 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.966512 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.966541 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:47 crc kubenswrapper[4932]: I1125 08:49:47.966564 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:47Z","lastTransitionTime":"2025-11-25T08:49:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.069507 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.069544 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.069555 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.069572 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.069582 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:48Z","lastTransitionTime":"2025-11-25T08:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.172909 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.172974 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.172991 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.173016 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.173036 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:48Z","lastTransitionTime":"2025-11-25T08:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.276088 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.276138 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.276155 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.276174 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.276215 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:48Z","lastTransitionTime":"2025-11-25T08:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.379592 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.379635 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.379646 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.379664 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.379676 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:48Z","lastTransitionTime":"2025-11-25T08:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.482468 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.482504 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.482512 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.482525 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.482534 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:48Z","lastTransitionTime":"2025-11-25T08:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.585437 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.585488 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.585500 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.585518 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.585536 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:48Z","lastTransitionTime":"2025-11-25T08:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.687953 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.688030 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.688055 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.688086 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.688109 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:48Z","lastTransitionTime":"2025-11-25T08:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.791812 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.791866 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.791883 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.791910 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.791927 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:48Z","lastTransitionTime":"2025-11-25T08:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.894278 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.894343 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.894370 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.894416 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.894439 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:48Z","lastTransitionTime":"2025-11-25T08:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.997073 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.997108 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.997117 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.997156 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:48 crc kubenswrapper[4932]: I1125 08:49:48.997167 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:48Z","lastTransitionTime":"2025-11-25T08:49:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.101035 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.101095 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.101122 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.101151 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.101171 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:49Z","lastTransitionTime":"2025-11-25T08:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.204294 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.204358 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.204375 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.204394 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.204408 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:49Z","lastTransitionTime":"2025-11-25T08:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.307508 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.307599 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.307616 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.307641 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.307660 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:49Z","lastTransitionTime":"2025-11-25T08:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.415504 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.415601 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.415625 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.415653 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.415675 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:49Z","lastTransitionTime":"2025-11-25T08:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.518553 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.518623 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.518645 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.518676 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.518698 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:49Z","lastTransitionTime":"2025-11-25T08:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.605837 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.605890 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:49 crc kubenswrapper[4932]: E1125 08:49:49.606026 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.606075 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:49 crc kubenswrapper[4932]: E1125 08:49:49.606331 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.606348 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:49 crc kubenswrapper[4932]: E1125 08:49:49.606432 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:49 crc kubenswrapper[4932]: E1125 08:49:49.606551 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.622570 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.622635 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.622652 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.622679 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.622698 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:49Z","lastTransitionTime":"2025-11-25T08:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.725589 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.725660 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.725683 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.725707 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.725724 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:49Z","lastTransitionTime":"2025-11-25T08:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.829224 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.829299 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.829325 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.829356 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.829379 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:49Z","lastTransitionTime":"2025-11-25T08:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.932706 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.932760 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.932770 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.932787 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:49 crc kubenswrapper[4932]: I1125 08:49:49.932798 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:49Z","lastTransitionTime":"2025-11-25T08:49:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.036026 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.036086 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.036099 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.036120 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.036135 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:50Z","lastTransitionTime":"2025-11-25T08:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.138563 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.138620 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.138630 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.138650 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.138666 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:50Z","lastTransitionTime":"2025-11-25T08:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.241061 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.241124 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.241140 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.241165 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.241218 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:50Z","lastTransitionTime":"2025-11-25T08:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.343437 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.343492 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.343508 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.343531 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.343548 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:50Z","lastTransitionTime":"2025-11-25T08:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.446786 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.446852 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.446871 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.446897 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.446918 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:50Z","lastTransitionTime":"2025-11-25T08:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.549585 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.549635 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.549652 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.549676 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.549692 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:50Z","lastTransitionTime":"2025-11-25T08:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.626969 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:39Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI1125 08:49:39.103135 6402 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI1125 08:49:39.103160 6402 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1125 08:49:39.103261 6402 factory.go:1336] Added *v1.Node event handler 7\\\\nI1125 08:49:39.103323 6402 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1125 08:49:39.103623 6402 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 08:49:39.103650 6402 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 08:49:39.103662 6402 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1125 08:49:39.103687 6402 factory.go:656] Stopping watch factory\\\\nI1125 08:49:39.103696 6402 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:39.103719 6402 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:39.103791 6402 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1125 08:49:39.103843 6402 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:39.103869 6402 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 08:49:39.103969 6402 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.640900 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.651986 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.652178 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.652240 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.652252 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.652270 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.652284 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:50Z","lastTransitionTime":"2025-11-25T08:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.663471 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.678217 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.690548 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.703800 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.715815 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.732121 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.745154 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.755680 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.755733 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.755746 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.755764 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.755774 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:50Z","lastTransitionTime":"2025-11-25T08:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.756669 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.789203 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.820682 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.835807 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.852249 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.863278 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.863329 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.863344 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.863366 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.863378 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:50Z","lastTransitionTime":"2025-11-25T08:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.866692 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.879816 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:50Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.965705 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.965749 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.965760 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.965776 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:50 crc kubenswrapper[4932]: I1125 08:49:50.965801 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:50Z","lastTransitionTime":"2025-11-25T08:49:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.068640 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.068730 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.068747 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.068771 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.068789 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.171717 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.171785 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.171803 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.171828 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.171847 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.275865 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.275934 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.275951 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.275977 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.275995 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.378920 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.379009 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.379022 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.379044 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.379057 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.481942 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.482012 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.482029 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.482052 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.482069 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.584969 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.585030 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.585047 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.585071 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.585088 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.607701 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:51 crc kubenswrapper[4932]: E1125 08:49:51.607887 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.607933 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.608034 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.608103 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:51 crc kubenswrapper[4932]: E1125 08:49:51.608162 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:51 crc kubenswrapper[4932]: E1125 08:49:51.608475 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:51 crc kubenswrapper[4932]: E1125 08:49:51.608609 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.690006 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.690062 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.690073 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.690089 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.690101 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.709326 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.709368 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.709379 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.709394 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.709407 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: E1125 08:49:51.727593 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:51Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.730732 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.730771 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.730787 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.730807 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.730823 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: E1125 08:49:51.745299 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:51Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.750097 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.750143 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.750159 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.750181 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.750219 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: E1125 08:49:51.771122 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:51Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.775937 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.775985 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.775996 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.776014 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.776028 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: E1125 08:49:51.795818 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:51Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.800562 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.800621 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.800638 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.800662 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.800679 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: E1125 08:49:51.819885 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:51Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:51 crc kubenswrapper[4932]: E1125 08:49:51.820109 4932 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.822485 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.822546 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.822568 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.822591 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.822608 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.925742 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.925782 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.925794 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.925812 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:51 crc kubenswrapper[4932]: I1125 08:49:51.925825 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:51Z","lastTransitionTime":"2025-11-25T08:49:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.030488 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.030901 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.030918 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.030931 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.030941 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:52Z","lastTransitionTime":"2025-11-25T08:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.134402 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.134440 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.134451 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.134465 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.134473 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:52Z","lastTransitionTime":"2025-11-25T08:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.237816 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.237886 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.237909 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.237941 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.237964 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:52Z","lastTransitionTime":"2025-11-25T08:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.240540 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:52 crc kubenswrapper[4932]: E1125 08:49:52.240806 4932 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:52 crc kubenswrapper[4932]: E1125 08:49:52.240915 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs podName:58f40128-d3fc-4588-ad8f-8cf129079911 nodeName:}" failed. No retries permitted until 2025-11-25 08:50:08.240884526 +0000 UTC m=+68.366914129 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs") pod "network-metrics-daemon-fvbqs" (UID: "58f40128-d3fc-4588-ad8f-8cf129079911") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.340938 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.341016 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.341033 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.341450 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.341506 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:52Z","lastTransitionTime":"2025-11-25T08:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.444320 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.444379 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.444403 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.444435 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.444459 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:52Z","lastTransitionTime":"2025-11-25T08:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.547095 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.547149 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.547167 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.547245 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.547268 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:52Z","lastTransitionTime":"2025-11-25T08:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.650153 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.650219 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.650234 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.650254 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.650266 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:52Z","lastTransitionTime":"2025-11-25T08:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.753384 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.753440 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.753452 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.753471 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.753484 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:52Z","lastTransitionTime":"2025-11-25T08:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.856241 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.856291 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.856299 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.856314 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.856323 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:52Z","lastTransitionTime":"2025-11-25T08:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.959488 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.959544 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.959556 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.959573 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:52 crc kubenswrapper[4932]: I1125 08:49:52.959586 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:52Z","lastTransitionTime":"2025-11-25T08:49:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.062207 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.062246 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.062257 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.062276 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.062288 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:53Z","lastTransitionTime":"2025-11-25T08:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.165639 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.165710 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.165739 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.165768 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.165789 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:53Z","lastTransitionTime":"2025-11-25T08:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.269402 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.269491 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.269509 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.269532 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.269554 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:53Z","lastTransitionTime":"2025-11-25T08:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.372450 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.372504 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.372522 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.372546 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.372568 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:53Z","lastTransitionTime":"2025-11-25T08:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.453297 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.453448 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.453507 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.453559 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.453650 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.453822 4932 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.453922 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:50:25.45389432 +0000 UTC m=+85.579923923 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.454307 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:50:25.454285651 +0000 UTC m=+85.580315254 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.454451 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.454488 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.454512 4932 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.454570 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 08:50:25.454550977 +0000 UTC m=+85.580580580 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.455162 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.455232 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.455253 4932 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.455358 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 08:50:25.455293637 +0000 UTC m=+85.581323230 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.455487 4932 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.455604 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:50:25.455580754 +0000 UTC m=+85.581610357 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.475741 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.475789 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.475801 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.475815 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.475825 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:53Z","lastTransitionTime":"2025-11-25T08:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.579029 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.579086 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.579102 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.579128 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.579147 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:53Z","lastTransitionTime":"2025-11-25T08:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.605748 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.605922 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.606178 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.606405 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.606953 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.607022 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.607133 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:53 crc kubenswrapper[4932]: E1125 08:49:53.607341 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.682642 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.682694 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.682707 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.682724 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.682735 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:53Z","lastTransitionTime":"2025-11-25T08:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.785731 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.785790 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.785808 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.785878 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.785897 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:53Z","lastTransitionTime":"2025-11-25T08:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.888594 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.888660 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.888680 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.888704 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.888721 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:53Z","lastTransitionTime":"2025-11-25T08:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.991099 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.991142 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.991153 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.991170 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:53 crc kubenswrapper[4932]: I1125 08:49:53.991181 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:53Z","lastTransitionTime":"2025-11-25T08:49:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.093675 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.093714 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.093724 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.093740 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.093750 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:54Z","lastTransitionTime":"2025-11-25T08:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.196690 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.196845 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.196870 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.196892 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.196909 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:54Z","lastTransitionTime":"2025-11-25T08:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.300269 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.300337 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.300357 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.300382 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.300399 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:54Z","lastTransitionTime":"2025-11-25T08:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.403086 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.403434 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.403625 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.403658 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.403679 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:54Z","lastTransitionTime":"2025-11-25T08:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.506681 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.506719 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.506732 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.506748 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.506758 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:54Z","lastTransitionTime":"2025-11-25T08:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.606556 4932 scope.go:117] "RemoveContainer" containerID="b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.609912 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.609967 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.609990 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.610024 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.610048 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:54Z","lastTransitionTime":"2025-11-25T08:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.713418 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.713737 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.713757 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.713782 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.713800 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:54Z","lastTransitionTime":"2025-11-25T08:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.815937 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.815974 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.815986 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.816004 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.816016 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:54Z","lastTransitionTime":"2025-11-25T08:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.919610 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.919658 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.919673 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.919694 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:54 crc kubenswrapper[4932]: I1125 08:49:54.919711 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:54Z","lastTransitionTime":"2025-11-25T08:49:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.000952 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/1.log" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.003912 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997"} Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.004376 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.022718 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.022770 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.022782 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.022800 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.022812 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:55Z","lastTransitionTime":"2025-11-25T08:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.028388 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.056818 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.071698 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.091631 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.103768 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.116267 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.124280 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.124306 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.124314 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.124327 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.124337 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:55Z","lastTransitionTime":"2025-11-25T08:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.136497 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.151902 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.165816 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.188045 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:39Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI1125 08:49:39.103135 6402 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI1125 08:49:39.103160 6402 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1125 08:49:39.103261 6402 factory.go:1336] Added *v1.Node event handler 7\\\\nI1125 08:49:39.103323 6402 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1125 08:49:39.103623 6402 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 08:49:39.103650 6402 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 08:49:39.103662 6402 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1125 08:49:39.103687 6402 factory.go:656] Stopping watch factory\\\\nI1125 08:49:39.103696 6402 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:39.103719 6402 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:39.103791 6402 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1125 08:49:39.103843 6402 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:39.103869 6402 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 08:49:39.103969 6402 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.203383 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.215208 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.226367 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.226399 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.226407 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.226421 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.226430 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:55Z","lastTransitionTime":"2025-11-25T08:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.231057 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.241987 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.259152 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.276458 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.288245 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.332817 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.332887 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.332908 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.332932 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.332956 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:55Z","lastTransitionTime":"2025-11-25T08:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.435800 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.435852 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.435864 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.435881 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.435893 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:55Z","lastTransitionTime":"2025-11-25T08:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.538797 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.538841 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.538852 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.538878 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.538889 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:55Z","lastTransitionTime":"2025-11-25T08:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.591750 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.604897 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.604950 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.604976 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.604996 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:55 crc kubenswrapper[4932]: E1125 08:49:55.605010 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.605021 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:55 crc kubenswrapper[4932]: E1125 08:49:55.605150 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:55 crc kubenswrapper[4932]: E1125 08:49:55.605293 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:55 crc kubenswrapper[4932]: E1125 08:49:55.605337 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.612583 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.627663 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.641560 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.641595 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.641605 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.641620 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.641632 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:55Z","lastTransitionTime":"2025-11-25T08:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.648280 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.663098 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.674372 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.686704 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.699771 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.712511 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.731210 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:39Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI1125 08:49:39.103135 6402 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI1125 08:49:39.103160 6402 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1125 08:49:39.103261 6402 factory.go:1336] Added *v1.Node event handler 7\\\\nI1125 08:49:39.103323 6402 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1125 08:49:39.103623 6402 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 08:49:39.103650 6402 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 08:49:39.103662 6402 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1125 08:49:39.103687 6402 factory.go:656] Stopping watch factory\\\\nI1125 08:49:39.103696 6402 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:39.103719 6402 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:39.103791 6402 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1125 08:49:39.103843 6402 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:39.103869 6402 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 08:49:39.103969 6402 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.744311 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.744346 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.744357 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.744374 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.744385 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:55Z","lastTransitionTime":"2025-11-25T08:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.746020 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.759533 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.773020 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.784294 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.796682 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.816451 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.832743 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.842090 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:55Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.846700 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.846727 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.846736 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.846749 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.846758 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:55Z","lastTransitionTime":"2025-11-25T08:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.950009 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.950039 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.950048 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.950061 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:55 crc kubenswrapper[4932]: I1125 08:49:55.950070 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:55Z","lastTransitionTime":"2025-11-25T08:49:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.010080 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/2.log" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.011032 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/1.log" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.015902 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997" exitCode=1 Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.015950 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997"} Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.016035 4932 scope.go:117] "RemoveContainer" containerID="b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.017098 4932 scope.go:117] "RemoveContainer" containerID="2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997" Nov 25 08:49:56 crc kubenswrapper[4932]: E1125 08:49:56.017441 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.043697 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"096ad388-b4d5-4cb8-8c62-349ea9c4334d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e8f66d3dfcfc67f90083e6611e327c729d82111f820986cad673a82792d5d80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bda5bbf2bfe9fb4f5ff03e336f536c431fe2c89ae303bdb6afcb0bbb04b87641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d3939adbaf71715989609ee9a73b99541968cd43b3bf1af3cfe6a8d2e1c9b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.052871 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.052919 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.052940 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.052968 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.052989 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:56Z","lastTransitionTime":"2025-11-25T08:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.076938 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b39e280313b6b76bf4f993f95e686a63327ad2eea3cd90e74ae4361acb5d07dc\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:39Z\\\",\\\"message\\\":\\\"712973235162149816) with []\\\\nI1125 08:49:39.103135 6402 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI1125 08:49:39.103160 6402 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI1125 08:49:39.103261 6402 factory.go:1336] Added *v1.Node event handler 7\\\\nI1125 08:49:39.103323 6402 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI1125 08:49:39.103623 6402 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 08:49:39.103650 6402 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 08:49:39.103662 6402 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1125 08:49:39.103687 6402 factory.go:656] Stopping watch factory\\\\nI1125 08:49:39.103696 6402 handler.go:208] Removed *v1.Node event handler 2\\\\nI1125 08:49:39.103719 6402 handler.go:208] Removed *v1.Node event handler 7\\\\nI1125 08:49:39.103791 6402 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1125 08:49:39.103843 6402 ovnkube.go:599] Stopped ovnkube\\\\nI1125 08:49:39.103869 6402 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1125 08:49:39.103969 6402 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"message\\\":\\\"_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 08:49:55.527427 6606 port_cache.go:96] port-cache(openshift-network-diagnostics_network-check-target-xd92c): added port \\\\u0026{name:openshift-network-diagnostics_network-check-target-xd92c uuid:61897e97-c771-4738-8709-09636387cb00 logicalSwitch:crc ips:[0xc009e9b0b0] mac:[10 88 10 217 0 4] expires:{wall:0 ext:0 loc:\\\\u003cnil\\\\u003e}} with IP: [10.217.0.4/23] and MAC: 0a:58:0a:d9:00:04\\\\nF1125 08:49:55.527433 6606 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to ca\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.092865 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.105398 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.120065 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.133873 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.147805 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.155283 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.155448 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.155568 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.155655 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.155731 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:56Z","lastTransitionTime":"2025-11-25T08:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.168703 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.181369 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.199049 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.213410 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.228108 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.247283 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.257742 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.257783 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.257799 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.257828 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.257845 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:56Z","lastTransitionTime":"2025-11-25T08:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.269733 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.286547 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.300944 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.313303 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.327884 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:56Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.360691 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.360728 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.360739 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.360754 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.360765 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:56Z","lastTransitionTime":"2025-11-25T08:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.463231 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.463296 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.463313 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.463337 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.463355 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:56Z","lastTransitionTime":"2025-11-25T08:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.566801 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.566880 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.566904 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.566928 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.566945 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:56Z","lastTransitionTime":"2025-11-25T08:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.669604 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.669645 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.669656 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.669671 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.669682 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:56Z","lastTransitionTime":"2025-11-25T08:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.772800 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.772836 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.772844 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.772857 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.772865 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:56Z","lastTransitionTime":"2025-11-25T08:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.875707 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.875755 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.875763 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.875776 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.875785 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:56Z","lastTransitionTime":"2025-11-25T08:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.977618 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.977709 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.977723 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.977739 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:56 crc kubenswrapper[4932]: I1125 08:49:56.977750 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:56Z","lastTransitionTime":"2025-11-25T08:49:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.020410 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/2.log" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.023954 4932 scope.go:117] "RemoveContainer" containerID="2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997" Nov 25 08:49:57 crc kubenswrapper[4932]: E1125 08:49:57.024245 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.036777 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.048237 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.061370 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.071590 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.079328 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.079354 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.079362 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.079375 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.079384 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:57Z","lastTransitionTime":"2025-11-25T08:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.086266 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.098844 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.110441 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.133463 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.151703 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.167071 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.181013 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.182605 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.182661 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.182678 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.182699 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.182713 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:57Z","lastTransitionTime":"2025-11-25T08:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.194555 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.206363 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.217775 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.234290 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.246010 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"096ad388-b4d5-4cb8-8c62-349ea9c4334d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e8f66d3dfcfc67f90083e6611e327c729d82111f820986cad673a82792d5d80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bda5bbf2bfe9fb4f5ff03e336f536c431fe2c89ae303bdb6afcb0bbb04b87641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d3939adbaf71715989609ee9a73b99541968cd43b3bf1af3cfe6a8d2e1c9b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.276968 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"message\\\":\\\"_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 08:49:55.527427 6606 port_cache.go:96] port-cache(openshift-network-diagnostics_network-check-target-xd92c): added port \\\\u0026{name:openshift-network-diagnostics_network-check-target-xd92c uuid:61897e97-c771-4738-8709-09636387cb00 logicalSwitch:crc ips:[0xc009e9b0b0] mac:[10 88 10 217 0 4] expires:{wall:0 ext:0 loc:\\\\u003cnil\\\\u003e}} with IP: [10.217.0.4/23] and MAC: 0a:58:0a:d9:00:04\\\\nF1125 08:49:55.527433 6606 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to ca\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.285693 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.286105 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.286339 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.286573 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.286772 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:57Z","lastTransitionTime":"2025-11-25T08:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.297864 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:49:57Z is after 2025-08-24T17:21:41Z" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.393280 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.393807 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.394045 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.394441 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.394637 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:57Z","lastTransitionTime":"2025-11-25T08:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.497881 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.497928 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.497945 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.497967 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.497983 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:57Z","lastTransitionTime":"2025-11-25T08:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.601600 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.601642 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.601656 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.601675 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.601689 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:57Z","lastTransitionTime":"2025-11-25T08:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.604898 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.604945 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.605063 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:57 crc kubenswrapper[4932]: E1125 08:49:57.605052 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.605130 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:57 crc kubenswrapper[4932]: E1125 08:49:57.605449 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:57 crc kubenswrapper[4932]: E1125 08:49:57.605444 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:57 crc kubenswrapper[4932]: E1125 08:49:57.605586 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.704428 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.704489 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.704506 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.704529 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.704545 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:57Z","lastTransitionTime":"2025-11-25T08:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.806832 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.806878 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.806889 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.806903 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.806912 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:57Z","lastTransitionTime":"2025-11-25T08:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.909498 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.909555 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.909571 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.909595 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:57 crc kubenswrapper[4932]: I1125 08:49:57.909613 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:57Z","lastTransitionTime":"2025-11-25T08:49:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.011459 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.011486 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.011495 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.011508 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.011516 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:58Z","lastTransitionTime":"2025-11-25T08:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.114076 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.114136 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.114155 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.114179 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.114235 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:58Z","lastTransitionTime":"2025-11-25T08:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.217085 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.217139 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.217154 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.217174 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.217275 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:58Z","lastTransitionTime":"2025-11-25T08:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.320475 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.320516 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.320526 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.320541 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.320549 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:58Z","lastTransitionTime":"2025-11-25T08:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.423555 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.423624 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.423646 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.423674 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.423694 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:58Z","lastTransitionTime":"2025-11-25T08:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.526369 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.526401 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.526411 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.526425 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.526435 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:58Z","lastTransitionTime":"2025-11-25T08:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.629498 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.629588 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.629613 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.629647 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.629671 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:58Z","lastTransitionTime":"2025-11-25T08:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.733000 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.733050 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.733061 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.733077 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.733091 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:58Z","lastTransitionTime":"2025-11-25T08:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.835907 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.835949 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.835957 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.835970 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.835979 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:58Z","lastTransitionTime":"2025-11-25T08:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.938951 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.939029 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.939050 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.939081 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:58 crc kubenswrapper[4932]: I1125 08:49:58.939102 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:58Z","lastTransitionTime":"2025-11-25T08:49:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.041564 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.041606 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.041620 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.041636 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.041646 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:59Z","lastTransitionTime":"2025-11-25T08:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.144714 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.144993 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.145154 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.145255 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.145345 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:59Z","lastTransitionTime":"2025-11-25T08:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.248885 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.248951 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.249021 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.249053 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.249075 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:59Z","lastTransitionTime":"2025-11-25T08:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.352501 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.352745 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.352835 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.352914 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.353040 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:59Z","lastTransitionTime":"2025-11-25T08:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.456223 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.456276 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.456293 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.456315 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.456331 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:59Z","lastTransitionTime":"2025-11-25T08:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.559164 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.559669 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.559896 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.560091 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.560330 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:59Z","lastTransitionTime":"2025-11-25T08:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.605979 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.606095 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.606095 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.606173 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:49:59 crc kubenswrapper[4932]: E1125 08:49:59.606778 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:49:59 crc kubenswrapper[4932]: E1125 08:49:59.606867 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:49:59 crc kubenswrapper[4932]: E1125 08:49:59.606704 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:49:59 crc kubenswrapper[4932]: E1125 08:49:59.607144 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.663354 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.663427 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.663450 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.663477 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.663499 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:59Z","lastTransitionTime":"2025-11-25T08:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.766556 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.766628 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.766651 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.766676 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.766693 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:59Z","lastTransitionTime":"2025-11-25T08:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.869379 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.869445 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.869464 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.869486 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.869507 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:59Z","lastTransitionTime":"2025-11-25T08:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.972544 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.972633 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.972651 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.972674 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:49:59 crc kubenswrapper[4932]: I1125 08:49:59.972693 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:49:59Z","lastTransitionTime":"2025-11-25T08:49:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.076178 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.076276 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.076294 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.076317 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.076336 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:00Z","lastTransitionTime":"2025-11-25T08:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.179563 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.179663 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.179689 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.179722 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.179744 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:00Z","lastTransitionTime":"2025-11-25T08:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.282974 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.283022 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.283035 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.283053 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.283069 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:00Z","lastTransitionTime":"2025-11-25T08:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.386590 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.386680 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.386705 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.386733 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.386756 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:00Z","lastTransitionTime":"2025-11-25T08:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.490612 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.490671 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.490689 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.490711 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.491043 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:00Z","lastTransitionTime":"2025-11-25T08:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.594372 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.594904 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.595050 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.595205 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.595332 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:00Z","lastTransitionTime":"2025-11-25T08:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.621298 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.635888 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.650390 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.663335 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.684265 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.698105 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.698209 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.698244 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.698258 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.698278 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.698291 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:00Z","lastTransitionTime":"2025-11-25T08:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.712407 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.736023 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.754070 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.767268 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.785602 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.801069 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.801112 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.801125 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.801140 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.801151 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:00Z","lastTransitionTime":"2025-11-25T08:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.803493 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.822248 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.834301 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.849582 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.865230 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"096ad388-b4d5-4cb8-8c62-349ea9c4334d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e8f66d3dfcfc67f90083e6611e327c729d82111f820986cad673a82792d5d80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bda5bbf2bfe9fb4f5ff03e336f536c431fe2c89ae303bdb6afcb0bbb04b87641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d3939adbaf71715989609ee9a73b99541968cd43b3bf1af3cfe6a8d2e1c9b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.896754 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"message\\\":\\\"_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 08:49:55.527427 6606 port_cache.go:96] port-cache(openshift-network-diagnostics_network-check-target-xd92c): added port \\\\u0026{name:openshift-network-diagnostics_network-check-target-xd92c uuid:61897e97-c771-4738-8709-09636387cb00 logicalSwitch:crc ips:[0xc009e9b0b0] mac:[10 88 10 217 0 4] expires:{wall:0 ext:0 loc:\\\\u003cnil\\\\u003e}} with IP: [10.217.0.4/23] and MAC: 0a:58:0a:d9:00:04\\\\nF1125 08:49:55.527433 6606 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to ca\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.903624 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.903676 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.903695 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.903719 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.903737 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:00Z","lastTransitionTime":"2025-11-25T08:50:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:00 crc kubenswrapper[4932]: I1125 08:50:00.917450 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:00Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.006866 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.006922 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.006945 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.006973 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.006995 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:01Z","lastTransitionTime":"2025-11-25T08:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.110080 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.110161 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.110221 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.110255 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.110280 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:01Z","lastTransitionTime":"2025-11-25T08:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.212460 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.212488 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.212497 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.212513 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.212522 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:01Z","lastTransitionTime":"2025-11-25T08:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.315514 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.315573 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.315589 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.315615 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.315631 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:01Z","lastTransitionTime":"2025-11-25T08:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.418864 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.418912 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.418923 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.418939 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.418950 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:01Z","lastTransitionTime":"2025-11-25T08:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.525364 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.525408 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.525418 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.525433 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.525447 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:01Z","lastTransitionTime":"2025-11-25T08:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.605632 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:01 crc kubenswrapper[4932]: E1125 08:50:01.606041 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.605786 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:01 crc kubenswrapper[4932]: E1125 08:50:01.606539 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.605767 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:01 crc kubenswrapper[4932]: E1125 08:50:01.606907 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.605835 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:01 crc kubenswrapper[4932]: E1125 08:50:01.607301 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.627701 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.627758 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.627769 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.627786 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.627798 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:01Z","lastTransitionTime":"2025-11-25T08:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.731028 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.731067 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.731078 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.731092 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.731101 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:01Z","lastTransitionTime":"2025-11-25T08:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.834412 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.834464 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.834480 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.834499 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.834512 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:01Z","lastTransitionTime":"2025-11-25T08:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.937388 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.937457 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.937477 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.937504 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:01 crc kubenswrapper[4932]: I1125 08:50:01.937524 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:01Z","lastTransitionTime":"2025-11-25T08:50:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.038899 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.038919 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.038926 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.038937 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.038946 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.141726 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.141790 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.141812 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.141841 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.141864 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.176169 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.176219 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.176227 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.176240 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.176248 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: E1125 08:50:02.192859 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:02Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.197741 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.197795 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.197807 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.197823 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.197834 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: E1125 08:50:02.217121 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:02Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.223227 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.223253 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.223261 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.223276 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.223297 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: E1125 08:50:02.245303 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:02Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.250906 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.250957 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.250973 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.250994 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.251012 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: E1125 08:50:02.272239 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:02Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.277280 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.277343 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.277361 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.277387 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.277406 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: E1125 08:50:02.293071 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:02Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:02 crc kubenswrapper[4932]: E1125 08:50:02.293425 4932 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.294985 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.295032 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.295048 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.295069 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.295089 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.398618 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.398674 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.398691 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.398719 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.398738 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.501751 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.501812 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.501833 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.501860 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.501877 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.605677 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.605723 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.605742 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.605764 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.605783 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.708787 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.708814 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.708822 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.708836 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.708846 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.812267 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.812304 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.812313 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.812329 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.812338 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.915170 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.915238 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.915254 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.915274 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:02 crc kubenswrapper[4932]: I1125 08:50:02.915288 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:02Z","lastTransitionTime":"2025-11-25T08:50:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.017920 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.017971 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.017989 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.018011 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.018025 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:03Z","lastTransitionTime":"2025-11-25T08:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.277969 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.278025 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.278042 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.278063 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.278076 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:03Z","lastTransitionTime":"2025-11-25T08:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.382224 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.382266 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.382282 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.382304 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.382320 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:03Z","lastTransitionTime":"2025-11-25T08:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.485336 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.485377 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.485393 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.485414 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.485431 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:03Z","lastTransitionTime":"2025-11-25T08:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.587654 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.587691 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.587701 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.587715 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.587727 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:03Z","lastTransitionTime":"2025-11-25T08:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.605446 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:03 crc kubenswrapper[4932]: E1125 08:50:03.605563 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.605613 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:03 crc kubenswrapper[4932]: E1125 08:50:03.605663 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.605697 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:03 crc kubenswrapper[4932]: E1125 08:50:03.605732 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.605774 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:03 crc kubenswrapper[4932]: E1125 08:50:03.605818 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.690738 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.690771 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.690784 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.690802 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.690830 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:03Z","lastTransitionTime":"2025-11-25T08:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.793445 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.793482 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.793493 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.793510 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.793521 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:03Z","lastTransitionTime":"2025-11-25T08:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.895908 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.895949 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.895957 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.895972 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.895981 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:03Z","lastTransitionTime":"2025-11-25T08:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.998018 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.998055 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.998065 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.998080 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:03 crc kubenswrapper[4932]: I1125 08:50:03.998091 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:03Z","lastTransitionTime":"2025-11-25T08:50:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.100259 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.100299 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.100312 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.100328 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.100339 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:04Z","lastTransitionTime":"2025-11-25T08:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.202991 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.203081 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.203108 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.203138 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.203160 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:04Z","lastTransitionTime":"2025-11-25T08:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.306413 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.306472 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.306490 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.306513 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.306530 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:04Z","lastTransitionTime":"2025-11-25T08:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.408758 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.408808 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.408820 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.408838 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.408850 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:04Z","lastTransitionTime":"2025-11-25T08:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.512613 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.512694 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.512706 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.512727 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.512738 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:04Z","lastTransitionTime":"2025-11-25T08:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.616318 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.616359 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.616367 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.616381 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.616391 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:04Z","lastTransitionTime":"2025-11-25T08:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.718380 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.718424 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.718436 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.718452 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.718465 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:04Z","lastTransitionTime":"2025-11-25T08:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.821249 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.821291 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.821305 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.821322 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.821334 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:04Z","lastTransitionTime":"2025-11-25T08:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.923327 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.923367 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.923376 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.923394 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:04 crc kubenswrapper[4932]: I1125 08:50:04.923405 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:04Z","lastTransitionTime":"2025-11-25T08:50:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.026002 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.026050 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.026061 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.026079 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.026091 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:05Z","lastTransitionTime":"2025-11-25T08:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.128432 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.128472 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.128486 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.128500 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.128509 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:05Z","lastTransitionTime":"2025-11-25T08:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.231108 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.231154 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.231176 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.231228 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.231245 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:05Z","lastTransitionTime":"2025-11-25T08:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.333377 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.333415 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.333424 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.333439 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.333452 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:05Z","lastTransitionTime":"2025-11-25T08:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.435876 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.435911 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.435922 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.435936 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.435944 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:05Z","lastTransitionTime":"2025-11-25T08:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.537930 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.537968 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.537976 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.537991 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.538002 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:05Z","lastTransitionTime":"2025-11-25T08:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.604828 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:05 crc kubenswrapper[4932]: E1125 08:50:05.604950 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.605118 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:05 crc kubenswrapper[4932]: E1125 08:50:05.605177 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.605278 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:05 crc kubenswrapper[4932]: E1125 08:50:05.605363 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.605425 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:05 crc kubenswrapper[4932]: E1125 08:50:05.605596 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.639927 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.639960 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.639971 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.639986 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.639999 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:05Z","lastTransitionTime":"2025-11-25T08:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.745621 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.745688 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.745705 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.745730 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.745747 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:05Z","lastTransitionTime":"2025-11-25T08:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.847985 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.848544 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.848611 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.848677 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.848746 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:05Z","lastTransitionTime":"2025-11-25T08:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.952146 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.952201 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.952215 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.952231 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:05 crc kubenswrapper[4932]: I1125 08:50:05.952241 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:05Z","lastTransitionTime":"2025-11-25T08:50:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.055162 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.055695 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.055774 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.055848 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.055926 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:06Z","lastTransitionTime":"2025-11-25T08:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.159019 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.159062 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.159072 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.159089 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.159099 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:06Z","lastTransitionTime":"2025-11-25T08:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.261582 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.261640 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.261661 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.261689 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.261708 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:06Z","lastTransitionTime":"2025-11-25T08:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.364331 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.364383 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.364395 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.364414 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.364427 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:06Z","lastTransitionTime":"2025-11-25T08:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.466905 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.466961 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.466975 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.466996 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.467009 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:06Z","lastTransitionTime":"2025-11-25T08:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.569346 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.569391 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.569402 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.569417 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.569427 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:06Z","lastTransitionTime":"2025-11-25T08:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.672167 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.672264 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.672280 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.672304 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.672323 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:06Z","lastTransitionTime":"2025-11-25T08:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.776105 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.776233 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.776254 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.776287 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.776309 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:06Z","lastTransitionTime":"2025-11-25T08:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.878725 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.878771 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.878787 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.878804 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.878817 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:06Z","lastTransitionTime":"2025-11-25T08:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.989733 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.989785 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.989798 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.989815 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:06 crc kubenswrapper[4932]: I1125 08:50:06.989828 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:06Z","lastTransitionTime":"2025-11-25T08:50:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.092559 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.092592 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.092600 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.092612 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.092622 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:07Z","lastTransitionTime":"2025-11-25T08:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.195234 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.195283 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.195294 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.195315 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.195327 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:07Z","lastTransitionTime":"2025-11-25T08:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.297651 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.297688 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.297698 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.297714 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.297725 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:07Z","lastTransitionTime":"2025-11-25T08:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.400670 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.400704 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.400716 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.400732 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.400742 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:07Z","lastTransitionTime":"2025-11-25T08:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.503730 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.503772 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.503783 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.503798 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.503810 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:07Z","lastTransitionTime":"2025-11-25T08:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.604992 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.605036 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:07 crc kubenswrapper[4932]: E1125 08:50:07.605115 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.605003 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:07 crc kubenswrapper[4932]: E1125 08:50:07.605276 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.605312 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:07 crc kubenswrapper[4932]: E1125 08:50:07.605475 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:07 crc kubenswrapper[4932]: E1125 08:50:07.605550 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.606159 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.606202 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.606213 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.606228 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.606240 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:07Z","lastTransitionTime":"2025-11-25T08:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.709135 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.709253 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.709279 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.709310 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.709340 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:07Z","lastTransitionTime":"2025-11-25T08:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.812396 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.812456 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.812473 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.812497 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.812516 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:07Z","lastTransitionTime":"2025-11-25T08:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.915539 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.915858 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.915948 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.916056 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:07 crc kubenswrapper[4932]: I1125 08:50:07.916145 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:07Z","lastTransitionTime":"2025-11-25T08:50:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.019668 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.019719 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.019729 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.019747 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.019757 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:08Z","lastTransitionTime":"2025-11-25T08:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.121975 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.122823 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.123026 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.123235 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.123413 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:08Z","lastTransitionTime":"2025-11-25T08:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.225395 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.225432 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.225440 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.225453 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.225462 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:08Z","lastTransitionTime":"2025-11-25T08:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.322693 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:08 crc kubenswrapper[4932]: E1125 08:50:08.322876 4932 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:50:08 crc kubenswrapper[4932]: E1125 08:50:08.323283 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs podName:58f40128-d3fc-4588-ad8f-8cf129079911 nodeName:}" failed. No retries permitted until 2025-11-25 08:50:40.323260861 +0000 UTC m=+100.449290424 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs") pod "network-metrics-daemon-fvbqs" (UID: "58f40128-d3fc-4588-ad8f-8cf129079911") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.329507 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.329576 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.329595 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.329633 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.329653 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:08Z","lastTransitionTime":"2025-11-25T08:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.431450 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.431483 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.431492 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.431504 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.431513 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:08Z","lastTransitionTime":"2025-11-25T08:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.534517 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.534568 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.534577 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.534593 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.534604 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:08Z","lastTransitionTime":"2025-11-25T08:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.637142 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.637397 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.637462 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.637538 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.637631 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:08Z","lastTransitionTime":"2025-11-25T08:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.739966 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.740013 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.740024 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.740040 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.740052 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:08Z","lastTransitionTime":"2025-11-25T08:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.842058 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.842111 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.842123 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.842154 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.842166 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:08Z","lastTransitionTime":"2025-11-25T08:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.944770 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.945069 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.945164 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.945299 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:08 crc kubenswrapper[4932]: I1125 08:50:08.945415 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:08Z","lastTransitionTime":"2025-11-25T08:50:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.047606 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.047680 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.047698 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.047723 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.047768 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:09Z","lastTransitionTime":"2025-11-25T08:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.150765 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.151439 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.151463 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.151482 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.151493 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:09Z","lastTransitionTime":"2025-11-25T08:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.255139 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.255239 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.255264 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.255292 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.255318 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:09Z","lastTransitionTime":"2025-11-25T08:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.357609 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.357636 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.357645 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.357657 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.357664 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:09Z","lastTransitionTime":"2025-11-25T08:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.460831 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.460886 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.460899 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.460916 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.460931 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:09Z","lastTransitionTime":"2025-11-25T08:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.564399 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.564455 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.564463 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.564478 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.564487 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:09Z","lastTransitionTime":"2025-11-25T08:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.605724 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.605914 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.606170 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:09 crc kubenswrapper[4932]: E1125 08:50:09.606263 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:09 crc kubenswrapper[4932]: E1125 08:50:09.606357 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:09 crc kubenswrapper[4932]: E1125 08:50:09.606288 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.606388 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:09 crc kubenswrapper[4932]: E1125 08:50:09.606878 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.608955 4932 scope.go:117] "RemoveContainer" containerID="2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997" Nov 25 08:50:09 crc kubenswrapper[4932]: E1125 08:50:09.609416 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.666715 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.666748 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.666757 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.666773 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.666782 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:09Z","lastTransitionTime":"2025-11-25T08:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.769793 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.769844 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.769856 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.769875 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.769890 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:09Z","lastTransitionTime":"2025-11-25T08:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.872949 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.873029 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.873048 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.874422 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.874497 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:09Z","lastTransitionTime":"2025-11-25T08:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.977151 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.977466 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.977563 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.977667 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:09 crc kubenswrapper[4932]: I1125 08:50:09.977768 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:09Z","lastTransitionTime":"2025-11-25T08:50:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.080057 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.080134 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.080154 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.080179 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.080226 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:10Z","lastTransitionTime":"2025-11-25T08:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.184440 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.185110 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.185411 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.185505 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.185586 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:10Z","lastTransitionTime":"2025-11-25T08:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.288578 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.288637 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.288654 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.288678 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.288695 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:10Z","lastTransitionTime":"2025-11-25T08:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.391401 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.391445 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.391461 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.391483 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.391500 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:10Z","lastTransitionTime":"2025-11-25T08:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.494232 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.494276 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.494288 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.494306 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.494321 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:10Z","lastTransitionTime":"2025-11-25T08:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.597264 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.597295 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.597325 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.597340 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.597349 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:10Z","lastTransitionTime":"2025-11-25T08:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.671302 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.684124 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.696912 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.700201 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.700252 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.700265 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.700285 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.700299 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:10Z","lastTransitionTime":"2025-11-25T08:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.710387 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.724481 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.736358 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.747890 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.771615 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.785479 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.800123 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.803655 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.803684 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.803692 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.803704 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.803730 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:10Z","lastTransitionTime":"2025-11-25T08:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.814374 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.827300 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.839917 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.849424 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.860360 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.870300 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"096ad388-b4d5-4cb8-8c62-349ea9c4334d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e8f66d3dfcfc67f90083e6611e327c729d82111f820986cad673a82792d5d80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bda5bbf2bfe9fb4f5ff03e336f536c431fe2c89ae303bdb6afcb0bbb04b87641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d3939adbaf71715989609ee9a73b99541968cd43b3bf1af3cfe6a8d2e1c9b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.888611 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"message\\\":\\\"_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 08:49:55.527427 6606 port_cache.go:96] port-cache(openshift-network-diagnostics_network-check-target-xd92c): added port \\\\u0026{name:openshift-network-diagnostics_network-check-target-xd92c uuid:61897e97-c771-4738-8709-09636387cb00 logicalSwitch:crc ips:[0xc009e9b0b0] mac:[10 88 10 217 0 4] expires:{wall:0 ext:0 loc:\\\\u003cnil\\\\u003e}} with IP: [10.217.0.4/23] and MAC: 0a:58:0a:d9:00:04\\\\nF1125 08:49:55.527433 6606 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to ca\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.902364 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:10Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.905893 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.906070 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.906140 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.906226 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:10 crc kubenswrapper[4932]: I1125 08:50:10.906299 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:10Z","lastTransitionTime":"2025-11-25T08:50:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.008508 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.008561 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.008572 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.008586 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.008596 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:11Z","lastTransitionTime":"2025-11-25T08:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.110541 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.110572 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.110580 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.110593 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.110603 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:11Z","lastTransitionTime":"2025-11-25T08:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.213054 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.213300 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.213316 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.213330 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.213339 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:11Z","lastTransitionTime":"2025-11-25T08:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.316046 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.316104 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.316117 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.316131 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.316140 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:11Z","lastTransitionTime":"2025-11-25T08:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.419033 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.419081 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.419093 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.419111 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.419124 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:11Z","lastTransitionTime":"2025-11-25T08:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.522214 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.522334 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.522362 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.522430 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.522448 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:11Z","lastTransitionTime":"2025-11-25T08:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.605064 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.605062 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:11 crc kubenswrapper[4932]: E1125 08:50:11.605334 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.605107 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.605075 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:11 crc kubenswrapper[4932]: E1125 08:50:11.605410 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:11 crc kubenswrapper[4932]: E1125 08:50:11.605525 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:11 crc kubenswrapper[4932]: E1125 08:50:11.605661 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.626074 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.626128 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.626151 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.626178 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.626229 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:11Z","lastTransitionTime":"2025-11-25T08:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.728827 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.728988 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.729009 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.729083 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.729106 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:11Z","lastTransitionTime":"2025-11-25T08:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.832061 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.832098 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.832107 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.832120 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.832129 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:11Z","lastTransitionTime":"2025-11-25T08:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.935515 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.935561 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.935569 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.935584 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:11 crc kubenswrapper[4932]: I1125 08:50:11.935593 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:11Z","lastTransitionTime":"2025-11-25T08:50:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.038425 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.038472 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.038486 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.038506 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.038519 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.141183 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.141239 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.141249 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.141263 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.141275 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.243857 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.243908 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.243920 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.243937 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.243949 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.345762 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.345813 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.345825 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.345840 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.345852 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.448339 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.448383 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.448414 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.448430 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.448441 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.491567 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.491610 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.491620 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.491637 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.491647 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: E1125 08:50:12.504669 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:12Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.507750 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.507781 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.507789 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.507801 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.507810 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: E1125 08:50:12.520446 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:12Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.524772 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.524829 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.524841 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.524857 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.524868 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: E1125 08:50:12.538120 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:12Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.541821 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.541875 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.541887 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.541904 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.541915 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: E1125 08:50:12.557882 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:12Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.562598 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.562628 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.562638 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.562652 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.562664 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: E1125 08:50:12.574830 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:12Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:12 crc kubenswrapper[4932]: E1125 08:50:12.574973 4932 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.576675 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.576731 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.576746 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.576763 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.576774 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.679585 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.679649 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.679659 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.679676 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.679688 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.782324 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.782376 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.782386 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.782401 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.782411 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.885412 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.885449 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.885461 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.885475 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.885485 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.988090 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.988162 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.988223 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.988249 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:12 crc kubenswrapper[4932]: I1125 08:50:12.988267 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:12Z","lastTransitionTime":"2025-11-25T08:50:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.090182 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.090262 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.090274 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.090291 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.090304 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:13Z","lastTransitionTime":"2025-11-25T08:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.195513 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.195550 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.195560 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.195574 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.195584 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:13Z","lastTransitionTime":"2025-11-25T08:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.297982 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.298016 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.298024 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.298037 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.298044 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:13Z","lastTransitionTime":"2025-11-25T08:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.400494 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.400527 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.400536 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.400551 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.400560 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:13Z","lastTransitionTime":"2025-11-25T08:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.502609 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.502643 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.502653 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.502669 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.502681 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:13Z","lastTransitionTime":"2025-11-25T08:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.605610 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.605723 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.605525 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.605807 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:13 crc kubenswrapper[4932]: E1125 08:50:13.605753 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.606568 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.606636 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.606661 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.606931 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.606973 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:13Z","lastTransitionTime":"2025-11-25T08:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:13 crc kubenswrapper[4932]: E1125 08:50:13.607000 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:13 crc kubenswrapper[4932]: E1125 08:50:13.607064 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:13 crc kubenswrapper[4932]: E1125 08:50:13.607430 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.709327 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.709395 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.709407 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.709423 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.709437 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:13Z","lastTransitionTime":"2025-11-25T08:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.812425 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.812471 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.812482 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.812505 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.812518 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:13Z","lastTransitionTime":"2025-11-25T08:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.914557 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.914600 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.914612 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.914628 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:13 crc kubenswrapper[4932]: I1125 08:50:13.914639 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:13Z","lastTransitionTime":"2025-11-25T08:50:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.016950 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.017043 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.017058 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.017073 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.017083 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:14Z","lastTransitionTime":"2025-11-25T08:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.075620 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kvhb4_199dbdf9-e2fc-459e-9e17-f5d520309f0a/kube-multus/0.log" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.075685 4932 generic.go:334] "Generic (PLEG): container finished" podID="199dbdf9-e2fc-459e-9e17-f5d520309f0a" containerID="2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f" exitCode=1 Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.075724 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kvhb4" event={"ID":"199dbdf9-e2fc-459e-9e17-f5d520309f0a","Type":"ContainerDied","Data":"2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f"} Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.076184 4932 scope.go:117] "RemoveContainer" containerID="2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.094133 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.114538 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.119715 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.119754 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.119765 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.119782 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.119794 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:14Z","lastTransitionTime":"2025-11-25T08:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.132751 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.152335 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.174322 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.191345 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.201225 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.211628 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.223040 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.223082 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.223096 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.223117 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.223132 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:14Z","lastTransitionTime":"2025-11-25T08:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.227360 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:14Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:50:13Z\\\",\\\"message\\\":\\\"2025-11-25T08:49:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4\\\\n2025-11-25T08:49:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4 to /host/opt/cni/bin/\\\\n2025-11-25T08:49:28Z [verbose] multus-daemon started\\\\n2025-11-25T08:49:28Z [verbose] Readiness Indicator file check\\\\n2025-11-25T08:50:13Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.249698 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.266051 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.278983 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.292930 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.304004 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.317521 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.325632 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.325659 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.325668 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.325681 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.325690 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:14Z","lastTransitionTime":"2025-11-25T08:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.333111 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"096ad388-b4d5-4cb8-8c62-349ea9c4334d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e8f66d3dfcfc67f90083e6611e327c729d82111f820986cad673a82792d5d80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bda5bbf2bfe9fb4f5ff03e336f536c431fe2c89ae303bdb6afcb0bbb04b87641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d3939adbaf71715989609ee9a73b99541968cd43b3bf1af3cfe6a8d2e1c9b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.358806 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"message\\\":\\\"_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 08:49:55.527427 6606 port_cache.go:96] port-cache(openshift-network-diagnostics_network-check-target-xd92c): added port \\\\u0026{name:openshift-network-diagnostics_network-check-target-xd92c uuid:61897e97-c771-4738-8709-09636387cb00 logicalSwitch:crc ips:[0xc009e9b0b0] mac:[10 88 10 217 0 4] expires:{wall:0 ext:0 loc:\\\\u003cnil\\\\u003e}} with IP: [10.217.0.4/23] and MAC: 0a:58:0a:d9:00:04\\\\nF1125 08:49:55.527433 6606 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to ca\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.373326 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:14Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.427769 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.427826 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.427836 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.427852 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.427862 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:14Z","lastTransitionTime":"2025-11-25T08:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.531386 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.531449 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.531471 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.531502 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.531523 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:14Z","lastTransitionTime":"2025-11-25T08:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.634154 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.634236 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.634249 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.634265 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.634294 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:14Z","lastTransitionTime":"2025-11-25T08:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.737137 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.737406 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.737418 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.737434 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.737446 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:14Z","lastTransitionTime":"2025-11-25T08:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.842201 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.842260 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.842271 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.842286 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.842299 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:14Z","lastTransitionTime":"2025-11-25T08:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.945747 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.945787 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.945795 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.945809 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:14 crc kubenswrapper[4932]: I1125 08:50:14.945822 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:14Z","lastTransitionTime":"2025-11-25T08:50:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.048638 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.048711 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.048727 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.048746 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.048760 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:15Z","lastTransitionTime":"2025-11-25T08:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.080955 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kvhb4_199dbdf9-e2fc-459e-9e17-f5d520309f0a/kube-multus/0.log" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.081068 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kvhb4" event={"ID":"199dbdf9-e2fc-459e-9e17-f5d520309f0a","Type":"ContainerStarted","Data":"154dcd3feae41470aa678f3bfdfae9a5a4af769b14c800d21e37351835697115"} Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.112060 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"message\\\":\\\"_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 08:49:55.527427 6606 port_cache.go:96] port-cache(openshift-network-diagnostics_network-check-target-xd92c): added port \\\\u0026{name:openshift-network-diagnostics_network-check-target-xd92c uuid:61897e97-c771-4738-8709-09636387cb00 logicalSwitch:crc ips:[0xc009e9b0b0] mac:[10 88 10 217 0 4] expires:{wall:0 ext:0 loc:\\\\u003cnil\\\\u003e}} with IP: [10.217.0.4/23] and MAC: 0a:58:0a:d9:00:04\\\\nF1125 08:49:55.527433 6606 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to ca\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.129070 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.142589 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"096ad388-b4d5-4cb8-8c62-349ea9c4334d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e8f66d3dfcfc67f90083e6611e327c729d82111f820986cad673a82792d5d80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bda5bbf2bfe9fb4f5ff03e336f536c431fe2c89ae303bdb6afcb0bbb04b87641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d3939adbaf71715989609ee9a73b99541968cd43b3bf1af3cfe6a8d2e1c9b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.151838 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.151882 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.151894 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.151911 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.151943 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:15Z","lastTransitionTime":"2025-11-25T08:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.162566 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.181266 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.196519 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.213500 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.223984 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.234421 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.246460 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.254954 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.255042 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.255058 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.255078 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.255091 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:15Z","lastTransitionTime":"2025-11-25T08:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.258812 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.269811 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.289335 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.299609 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.309249 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.317948 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.328136 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://154dcd3feae41470aa678f3bfdfae9a5a4af769b14c800d21e37351835697115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:50:13Z\\\",\\\"message\\\":\\\"2025-11-25T08:49:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4\\\\n2025-11-25T08:49:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4 to /host/opt/cni/bin/\\\\n2025-11-25T08:49:28Z [verbose] multus-daemon started\\\\n2025-11-25T08:49:28Z [verbose] Readiness Indicator file check\\\\n2025-11-25T08:50:13Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:50:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.344941 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:15Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.357666 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.357721 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.357735 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.357754 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.357766 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:15Z","lastTransitionTime":"2025-11-25T08:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.459738 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.459793 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.459806 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.459825 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.459840 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:15Z","lastTransitionTime":"2025-11-25T08:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.563316 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.563396 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.563407 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.563422 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.563431 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:15Z","lastTransitionTime":"2025-11-25T08:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.605279 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.605293 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.605341 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.605432 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:15 crc kubenswrapper[4932]: E1125 08:50:15.605545 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:15 crc kubenswrapper[4932]: E1125 08:50:15.605693 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:15 crc kubenswrapper[4932]: E1125 08:50:15.605750 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:15 crc kubenswrapper[4932]: E1125 08:50:15.605798 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.665921 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.665972 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.665988 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.666008 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.666021 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:15Z","lastTransitionTime":"2025-11-25T08:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.768372 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.768417 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.768431 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.768451 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.768466 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:15Z","lastTransitionTime":"2025-11-25T08:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.871236 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.871294 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.871312 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.871334 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.871352 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:15Z","lastTransitionTime":"2025-11-25T08:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.973684 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.973968 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.974038 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.974108 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:15 crc kubenswrapper[4932]: I1125 08:50:15.974180 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:15Z","lastTransitionTime":"2025-11-25T08:50:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.077737 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.078122 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.078301 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.078395 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.078470 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:16Z","lastTransitionTime":"2025-11-25T08:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.182425 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.182493 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.182510 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.182534 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.182554 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:16Z","lastTransitionTime":"2025-11-25T08:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.285768 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.285852 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.285872 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.285910 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.285946 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:16Z","lastTransitionTime":"2025-11-25T08:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.388777 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.388846 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.388863 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.388891 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.388909 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:16Z","lastTransitionTime":"2025-11-25T08:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.492394 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.492482 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.492502 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.492531 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.492556 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:16Z","lastTransitionTime":"2025-11-25T08:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.599107 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.599610 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.600105 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.600154 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.600174 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:16Z","lastTransitionTime":"2025-11-25T08:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.703948 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.704002 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.704018 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.704042 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.704059 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:16Z","lastTransitionTime":"2025-11-25T08:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.808138 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.808250 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.808269 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.808295 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.808317 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:16Z","lastTransitionTime":"2025-11-25T08:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.911991 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.912106 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.912133 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.912167 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:16 crc kubenswrapper[4932]: I1125 08:50:16.912238 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:16Z","lastTransitionTime":"2025-11-25T08:50:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.015361 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.015423 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.015445 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.015473 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.015496 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:17Z","lastTransitionTime":"2025-11-25T08:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.117874 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.117925 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.117942 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.117964 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.117981 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:17Z","lastTransitionTime":"2025-11-25T08:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.220927 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.220982 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.220995 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.221013 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.221026 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:17Z","lastTransitionTime":"2025-11-25T08:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.324358 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.324419 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.324441 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.324472 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.324507 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:17Z","lastTransitionTime":"2025-11-25T08:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.427582 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.427649 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.427676 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.427706 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.427728 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:17Z","lastTransitionTime":"2025-11-25T08:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.531082 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.531156 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.531178 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.531252 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.531275 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:17Z","lastTransitionTime":"2025-11-25T08:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.605856 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.605919 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.605864 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.605864 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:17 crc kubenswrapper[4932]: E1125 08:50:17.606121 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:17 crc kubenswrapper[4932]: E1125 08:50:17.606375 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:17 crc kubenswrapper[4932]: E1125 08:50:17.606489 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:17 crc kubenswrapper[4932]: E1125 08:50:17.606618 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.633929 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.633994 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.634015 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.634250 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.634305 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:17Z","lastTransitionTime":"2025-11-25T08:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.737717 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.737792 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.737810 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.737835 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.737853 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:17Z","lastTransitionTime":"2025-11-25T08:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.840499 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.840559 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.840574 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.840591 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.840601 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:17Z","lastTransitionTime":"2025-11-25T08:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.942684 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.942716 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.942724 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.942736 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:17 crc kubenswrapper[4932]: I1125 08:50:17.942744 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:17Z","lastTransitionTime":"2025-11-25T08:50:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.045368 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.045435 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.045448 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.045464 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.045473 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:18Z","lastTransitionTime":"2025-11-25T08:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.147684 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.147745 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.147764 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.147789 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.147807 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:18Z","lastTransitionTime":"2025-11-25T08:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.251255 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.251325 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.251347 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.251377 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.251396 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:18Z","lastTransitionTime":"2025-11-25T08:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.353752 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.353800 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.353814 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.353829 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.353841 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:18Z","lastTransitionTime":"2025-11-25T08:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.456350 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.456401 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.456413 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.456430 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.456445 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:18Z","lastTransitionTime":"2025-11-25T08:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.559252 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.559316 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.559333 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.559362 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.559379 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:18Z","lastTransitionTime":"2025-11-25T08:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.662554 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.662639 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.662661 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.662691 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.662715 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:18Z","lastTransitionTime":"2025-11-25T08:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.765799 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.765860 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.765876 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.765901 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.765918 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:18Z","lastTransitionTime":"2025-11-25T08:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.868799 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.868878 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.868900 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.868930 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.868957 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:18Z","lastTransitionTime":"2025-11-25T08:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.972840 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.972918 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.972970 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.972999 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:18 crc kubenswrapper[4932]: I1125 08:50:18.973019 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:18Z","lastTransitionTime":"2025-11-25T08:50:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.076228 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.076289 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.076306 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.076330 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.076351 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:19Z","lastTransitionTime":"2025-11-25T08:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.178265 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.178347 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.178371 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.178413 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.178436 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:19Z","lastTransitionTime":"2025-11-25T08:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.281298 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.281399 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.281426 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.281456 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.281478 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:19Z","lastTransitionTime":"2025-11-25T08:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.384774 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.384828 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.384844 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.384867 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.384884 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:19Z","lastTransitionTime":"2025-11-25T08:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.487135 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.487252 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.487277 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.487310 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.487333 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:19Z","lastTransitionTime":"2025-11-25T08:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.590185 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.590297 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.590322 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.590351 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.590378 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:19Z","lastTransitionTime":"2025-11-25T08:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.605866 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:19 crc kubenswrapper[4932]: E1125 08:50:19.606043 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.607001 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.607211 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.607168 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:19 crc kubenswrapper[4932]: E1125 08:50:19.607316 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:19 crc kubenswrapper[4932]: E1125 08:50:19.607448 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:19 crc kubenswrapper[4932]: E1125 08:50:19.607737 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.693552 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.693600 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.693621 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.693648 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.693669 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:19Z","lastTransitionTime":"2025-11-25T08:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.796522 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.796580 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.796598 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.796622 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.796640 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:19Z","lastTransitionTime":"2025-11-25T08:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.898527 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.898583 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.898607 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.898627 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:19 crc kubenswrapper[4932]: I1125 08:50:19.898640 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:19Z","lastTransitionTime":"2025-11-25T08:50:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.001155 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.001496 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.001641 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.001795 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.001922 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:20Z","lastTransitionTime":"2025-11-25T08:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.104219 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.104270 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.104288 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.104310 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.104328 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:20Z","lastTransitionTime":"2025-11-25T08:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.206315 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.206358 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.206367 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.206382 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.206393 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:20Z","lastTransitionTime":"2025-11-25T08:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.309399 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.309472 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.309492 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.309515 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.309535 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:20Z","lastTransitionTime":"2025-11-25T08:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.411541 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.411607 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.411631 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.411659 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.411680 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:20Z","lastTransitionTime":"2025-11-25T08:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.515020 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.515069 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.515078 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.515091 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.515101 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:20Z","lastTransitionTime":"2025-11-25T08:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.619378 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.619421 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.619432 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.619447 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.619457 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:20Z","lastTransitionTime":"2025-11-25T08:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.636329 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"message\\\":\\\"_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 08:49:55.527427 6606 port_cache.go:96] port-cache(openshift-network-diagnostics_network-check-target-xd92c): added port \\\\u0026{name:openshift-network-diagnostics_network-check-target-xd92c uuid:61897e97-c771-4738-8709-09636387cb00 logicalSwitch:crc ips:[0xc009e9b0b0] mac:[10 88 10 217 0 4] expires:{wall:0 ext:0 loc:\\\\u003cnil\\\\u003e}} with IP: [10.217.0.4/23] and MAC: 0a:58:0a:d9:00:04\\\\nF1125 08:49:55.527433 6606 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to ca\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.648795 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.661488 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"096ad388-b4d5-4cb8-8c62-349ea9c4334d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e8f66d3dfcfc67f90083e6611e327c729d82111f820986cad673a82792d5d80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bda5bbf2bfe9fb4f5ff03e336f536c431fe2c89ae303bdb6afcb0bbb04b87641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d3939adbaf71715989609ee9a73b99541968cd43b3bf1af3cfe6a8d2e1c9b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.673847 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.708388 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.722136 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.722360 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.722380 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.722388 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.722399 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.722407 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:20Z","lastTransitionTime":"2025-11-25T08:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.741585 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.751818 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.761783 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.775559 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.791430 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.808757 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.824803 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.824867 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.824889 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.824919 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.824943 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:20Z","lastTransitionTime":"2025-11-25T08:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.828339 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.841918 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.854372 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.867724 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.883108 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://154dcd3feae41470aa678f3bfdfae9a5a4af769b14c800d21e37351835697115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:50:13Z\\\",\\\"message\\\":\\\"2025-11-25T08:49:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4\\\\n2025-11-25T08:49:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4 to /host/opt/cni/bin/\\\\n2025-11-25T08:49:28Z [verbose] multus-daemon started\\\\n2025-11-25T08:49:28Z [verbose] Readiness Indicator file check\\\\n2025-11-25T08:50:13Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:50:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.902717 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:20Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.927816 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.927856 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.927864 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.927877 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:20 crc kubenswrapper[4932]: I1125 08:50:20.927887 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:20Z","lastTransitionTime":"2025-11-25T08:50:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.030677 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.030728 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.030741 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.030759 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.030771 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:21Z","lastTransitionTime":"2025-11-25T08:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.133549 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.133587 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.133594 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.133608 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.133617 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:21Z","lastTransitionTime":"2025-11-25T08:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.236484 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.236547 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.236562 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.236580 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.236595 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:21Z","lastTransitionTime":"2025-11-25T08:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.339698 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.339767 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.339784 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.339808 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.339826 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:21Z","lastTransitionTime":"2025-11-25T08:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.442330 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.442371 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.442382 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.442398 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.442408 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:21Z","lastTransitionTime":"2025-11-25T08:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.545216 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.545255 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.545266 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.545283 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.545294 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:21Z","lastTransitionTime":"2025-11-25T08:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.605108 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.605181 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:21 crc kubenswrapper[4932]: E1125 08:50:21.605321 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.605332 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:21 crc kubenswrapper[4932]: E1125 08:50:21.605427 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.605694 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:21 crc kubenswrapper[4932]: E1125 08:50:21.606165 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:21 crc kubenswrapper[4932]: E1125 08:50:21.606282 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.606810 4932 scope.go:117] "RemoveContainer" containerID="2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.625760 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.648644 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.648678 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.648688 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.648705 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.648716 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:21Z","lastTransitionTime":"2025-11-25T08:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.751667 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.751740 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.751781 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.751816 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.751839 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:21Z","lastTransitionTime":"2025-11-25T08:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.854410 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.854724 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.854737 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.854753 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.854764 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:21Z","lastTransitionTime":"2025-11-25T08:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.957247 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.957289 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.957300 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.957318 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:21 crc kubenswrapper[4932]: I1125 08:50:21.957332 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:21Z","lastTransitionTime":"2025-11-25T08:50:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.059908 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.059962 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.059974 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.059993 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.060004 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.108665 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/2.log" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.111237 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6"} Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.111800 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.128635 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.143430 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"096ad388-b4d5-4cb8-8c62-349ea9c4334d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e8f66d3dfcfc67f90083e6611e327c729d82111f820986cad673a82792d5d80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bda5bbf2bfe9fb4f5ff03e336f536c431fe2c89ae303bdb6afcb0bbb04b87641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d3939adbaf71715989609ee9a73b99541968cd43b3bf1af3cfe6a8d2e1c9b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.162071 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.162099 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.162107 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.162119 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.162128 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.169045 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"message\\\":\\\"_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 08:49:55.527427 6606 port_cache.go:96] port-cache(openshift-network-diagnostics_network-check-target-xd92c): added port \\\\u0026{name:openshift-network-diagnostics_network-check-target-xd92c uuid:61897e97-c771-4738-8709-09636387cb00 logicalSwitch:crc ips:[0xc009e9b0b0] mac:[10 88 10 217 0 4] expires:{wall:0 ext:0 loc:\\\\u003cnil\\\\u003e}} with IP: [10.217.0.4/23] and MAC: 0a:58:0a:d9:00:04\\\\nF1125 08:49:55.527433 6606 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to ca\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:50:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.184100 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.194582 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.208628 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.219130 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.229910 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.242608 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.256001 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.264172 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.264229 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.264238 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.264251 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.264260 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.269781 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.281681 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.292917 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.305628 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.314892 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.326020 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://154dcd3feae41470aa678f3bfdfae9a5a4af769b14c800d21e37351835697115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:50:13Z\\\",\\\"message\\\":\\\"2025-11-25T08:49:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4\\\\n2025-11-25T08:49:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4 to /host/opt/cni/bin/\\\\n2025-11-25T08:49:28Z [verbose] multus-daemon started\\\\n2025-11-25T08:49:28Z [verbose] Readiness Indicator file check\\\\n2025-11-25T08:50:13Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:50:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.344472 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.358156 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.365966 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.366021 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.366033 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.366051 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.366065 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.369089 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ee2b369-10a1-4cd7-b9e2-294167107d86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff2840144d631dc539ccb7c6c3a3d2a0f10890544339bed232ac76313641ffae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d40aeccf50552a790adf9f073813f84671bf1ead9588a1caef7a9f39a6008eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d40aeccf50552a790adf9f073813f84671bf1ead9588a1caef7a9f39a6008eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.468084 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.468128 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.468139 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.468159 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.468175 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.570813 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.570853 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.570866 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.570882 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.570893 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.674066 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.674139 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.674160 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.674223 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.674248 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.696146 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.696241 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.696265 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.696286 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.696299 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: E1125 08:50:22.716412 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.722077 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.722138 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.722158 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.722183 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.722292 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: E1125 08:50:22.739641 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.744465 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.744498 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.744507 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.744521 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.744532 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: E1125 08:50:22.765094 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.770623 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.770692 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.770710 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.770737 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.770757 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: E1125 08:50:22.790698 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.795435 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.795498 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.795516 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.795546 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.795563 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: E1125 08:50:22.810829 4932 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"62bd1c5f-ae99-478e-b19e-e49920d66581\\\",\\\"systemUUID\\\":\\\"fbb2a061-2abc-4717-831d-47e83fc0993f\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:22Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:22 crc kubenswrapper[4932]: E1125 08:50:22.811074 4932 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.812976 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.813013 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.813024 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.813042 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.813055 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.916525 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.916572 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.916603 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.916643 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:22 crc kubenswrapper[4932]: I1125 08:50:22.916665 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:22Z","lastTransitionTime":"2025-11-25T08:50:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.019431 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.019492 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.019510 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.019531 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.019546 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:23Z","lastTransitionTime":"2025-11-25T08:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.123740 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.123798 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.123831 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.123857 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.123876 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:23Z","lastTransitionTime":"2025-11-25T08:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.125861 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/3.log" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.127079 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/2.log" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.132247 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6" exitCode=1 Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.132326 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6"} Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.132429 4932 scope.go:117] "RemoveContainer" containerID="2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.133980 4932 scope.go:117] "RemoveContainer" containerID="0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6" Nov 25 08:50:23 crc kubenswrapper[4932]: E1125 08:50:23.134337 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.158489 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.171961 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.191458 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.206049 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.222837 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.227866 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.227900 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.227911 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.227926 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.227938 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:23Z","lastTransitionTime":"2025-11-25T08:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.236054 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.248125 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.270782 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.286278 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.303117 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.317455 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.329709 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.329903 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.329926 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.329946 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.329959 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:23Z","lastTransitionTime":"2025-11-25T08:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.332497 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.351905 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.365728 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.383464 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://154dcd3feae41470aa678f3bfdfae9a5a4af769b14c800d21e37351835697115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:50:13Z\\\",\\\"message\\\":\\\"2025-11-25T08:49:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4\\\\n2025-11-25T08:49:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4 to /host/opt/cni/bin/\\\\n2025-11-25T08:49:28Z [verbose] multus-daemon started\\\\n2025-11-25T08:49:28Z [verbose] Readiness Indicator file check\\\\n2025-11-25T08:50:13Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:50:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.395987 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ee2b369-10a1-4cd7-b9e2-294167107d86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff2840144d631dc539ccb7c6c3a3d2a0f10890544339bed232ac76313641ffae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d40aeccf50552a790adf9f073813f84671bf1ead9588a1caef7a9f39a6008eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d40aeccf50552a790adf9f073813f84671bf1ead9588a1caef7a9f39a6008eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.412969 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"096ad388-b4d5-4cb8-8c62-349ea9c4334d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e8f66d3dfcfc67f90083e6611e327c729d82111f820986cad673a82792d5d80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bda5bbf2bfe9fb4f5ff03e336f536c431fe2c89ae303bdb6afcb0bbb04b87641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d3939adbaf71715989609ee9a73b99541968cd43b3bf1af3cfe6a8d2e1c9b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.432154 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.432242 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.432252 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.432267 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.432275 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:23Z","lastTransitionTime":"2025-11-25T08:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.442939 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2755f436c0f4f5bf8c863bc6c981c44e140058c834c1075febfca0f1507c8997\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"message\\\":\\\"_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.246:9443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {ab0b1d51-5ec6-479b-8881-93dfa8d30337}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1125 08:49:55.527427 6606 port_cache.go:96] port-cache(openshift-network-diagnostics_network-check-target-xd92c): added port \\\\u0026{name:openshift-network-diagnostics_network-check-target-xd92c uuid:61897e97-c771-4738-8709-09636387cb00 logicalSwitch:crc ips:[0xc009e9b0b0] mac:[10 88 10 217 0 4] expires:{wall:0 ext:0 loc:\\\\u003cnil\\\\u003e}} with IP: [10.217.0.4/23] and MAC: 0a:58:0a:d9:00:04\\\\nF1125 08:49:55.527433 6606 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to ca\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"balancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}\\\\nI1125 08:50:22.496670 6976 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1125 08:50:22.497247 6976 services_controller.go:451] Built service openshift-marketplace/redhat-marketplace cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-marketplace_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-marketplace\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.140\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 08:50:22.497266 6976 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1125 08:50:22.497267 6976 services_controller.go:452] Built service openshift-marketplace/redhat-marketplace per-node LB for network=default: []services.LB{}\\\\nI1125 08:50:22\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:50:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.464032 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:23Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.534858 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.534923 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.534939 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.534963 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.534980 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:23Z","lastTransitionTime":"2025-11-25T08:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.605831 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.605873 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.605975 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:23 crc kubenswrapper[4932]: E1125 08:50:23.606178 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:23 crc kubenswrapper[4932]: E1125 08:50:23.606287 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:23 crc kubenswrapper[4932]: E1125 08:50:23.606430 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.606945 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:23 crc kubenswrapper[4932]: E1125 08:50:23.607182 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.637252 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.637322 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.637346 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.637375 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.637394 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:23Z","lastTransitionTime":"2025-11-25T08:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.739626 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.739680 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.739702 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.739734 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.739755 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:23Z","lastTransitionTime":"2025-11-25T08:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.841623 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.841694 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.841715 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.841744 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.841766 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:23Z","lastTransitionTime":"2025-11-25T08:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.944861 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.944955 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.944977 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.945007 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:23 crc kubenswrapper[4932]: I1125 08:50:23.945031 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:23Z","lastTransitionTime":"2025-11-25T08:50:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.048079 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.048137 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.048147 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.048162 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.048172 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:24Z","lastTransitionTime":"2025-11-25T08:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.137783 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/3.log" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.141697 4932 scope.go:117] "RemoveContainer" containerID="0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6" Nov 25 08:50:24 crc kubenswrapper[4932]: E1125 08:50:24.141844 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.151390 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.151444 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.151458 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.151479 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.151494 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:24Z","lastTransitionTime":"2025-11-25T08:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.154725 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ee2b369-10a1-4cd7-b9e2-294167107d86\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff2840144d631dc539ccb7c6c3a3d2a0f10890544339bed232ac76313641ffae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3d40aeccf50552a790adf9f073813f84671bf1ead9588a1caef7a9f39a6008eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3d40aeccf50552a790adf9f073813f84671bf1ead9588a1caef7a9f39a6008eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.173359 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"096ad388-b4d5-4cb8-8c62-349ea9c4334d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0e8f66d3dfcfc67f90083e6611e327c729d82111f820986cad673a82792d5d80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bda5bbf2bfe9fb4f5ff03e336f536c431fe2c89ae303bdb6afcb0bbb04b87641\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6d3939adbaf71715989609ee9a73b99541968cd43b3bf1af3cfe6a8d2e1c9b4b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c39050dc749f8812fdcbea158290e5c6048bf8799c618895d94c1fd7dd2acb85\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.196502 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"24f5eec6-6332-4bae-bce3-4faa1156c249\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:50:22Z\\\",\\\"message\\\":\\\"balancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-image-registry/image-registry\\\\\\\"}\\\\nI1125 08:50:22.496670 6976 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI1125 08:50:22.497247 6976 services_controller.go:451] Built service openshift-marketplace/redhat-marketplace cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-marketplace_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-marketplace\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.5.140\\\\\\\", Port:50051, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1125 08:50:22.497266 6976 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI1125 08:50:22.497267 6976 services_controller.go:452] Built service openshift-marketplace/redhat-marketplace per-node LB for network=default: []services.LB{}\\\\nI1125 08:50:22\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:50:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2h2l2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:23Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-rlhks\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.209116 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d294666-e880-455e-a17f-1f878dddc477\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f5c33c63e2c3a6ea86b6c8672acbe42b5b32a57ffb62e17ac5aaad27dbda2ef8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44dcd7a7d85325e458275274ab9bb464b58270cf5e3bf53ce060bbacd78bb9ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-tbnhz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:34Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-fft4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.222861 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"343dd8b2-7428-4b00-9c0a-00f728022d6d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce9f34a24c4958f99bfd541b242f533a659714e9ae4e7702b5e700f9829996c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16ddede2a343ed8a57da71aaa5d080fba84bf2b9e4413ee21a7fa2e67aabace1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e6a3047050ddea27fd7ac04a8a3cd5f42f919d1ae7544baa8624f1b76d4a0453\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:27Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7f17693b5ee57df8d433e34381450246a21b49b5090c714e2064eea083305e8b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c10328d8eba42f8620e9c85d12bcf330eb8ad6834531b94593f2f29a5af8d6db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://895b5db96c9f0f44c1d1b65ceacc9fa9ad7061740cc08023b7c39e9490e90676\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c4c164b788537c1804c725bae3e0ef9d0ad57a60a1861edcda4893590ce3cb44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zlq2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jmvtb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.241394 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-8jl2g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1c51936d-6aa7-4dcc-b09e-9a5211e49cb3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://562632a7ef50c234b91afebd1f2f5240fdb3b17dcdc111e281dd4200b98756e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zmp5l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:25Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-8jl2g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.251577 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"58f40128-d3fc-4588-ad8f-8cf129079911\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:36Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-c5nh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:36Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-fvbqs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.253950 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.254002 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.254018 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.254042 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.254059 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:24Z","lastTransitionTime":"2025-11-25T08:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.265134 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.276582 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a65ae578374684e89abcf504454a4cdad25b611d703354ba102e94137a3004b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.295254 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd3fc7f80f891860c3a370d2f84e7bc85556e1fd68b3b49afea063f976a174b6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://dca2ed2a3d7ca857100388302c43b2f3cc3c3bfc88681a3641d68c15bf6ca3bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.308375 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc52f208-3635-4b33-a1f2-720bcff56064\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5a261a4b0bdd209aaed5886b3b1766409abe6cc295329d4dbca384c603087f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mpmnl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-plbqh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.326967 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.340215 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.350828 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pb6ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"531c7937-727f-4ac5-9e26-0d7efacf93d3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b00bfe602fd427ff8673efd8370f2d0a60d2375ad9095a5bb64bc762d0d7c5a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mkqg4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:21Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pb6ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.356612 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.356668 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.356685 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.356707 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.356725 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:24Z","lastTransitionTime":"2025-11-25T08:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.364312 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-kvhb4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"199dbdf9-e2fc-459e-9e17-f5d520309f0a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:50:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://154dcd3feae41470aa678f3bfdfae9a5a4af769b14c800d21e37351835697115\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T08:50:13Z\\\",\\\"message\\\":\\\"2025-11-25T08:49:27+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4\\\\n2025-11-25T08:49:27+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_30165ed9-25ac-4f0f-8d58-50e38c3b0aa4 to /host/opt/cni/bin/\\\\n2025-11-25T08:49:28Z [verbose] multus-daemon started\\\\n2025-11-25T08:49:28Z [verbose] Readiness Indicator file check\\\\n2025-11-25T08:50:13Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:24Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:50:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pbfbw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:22Z\\\"}}\" for pod \"openshift-multus\"/\"multus-kvhb4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.383559 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56071b8d-951c-462d-87da-1ab72df4fea7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4d661db08ec2d7fc1c6abb84ee7cd041444c34c323fb40406e74f1df8cedc382\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f66d76b8257b0564715aae8cb8980b8ce624c4f3c753bb19db7955f3b55ce08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://786e4ec83cd7c92a97157c8bd9bbd976bcab98044e036668d2f1d6144b73fc92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2aa896264e8b03e3ef12bd1e332f73733ecfec4942709e4e87b55cf06a1b81c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87ff1dcc47c1e9a2d69e605fe99b58a6e791bcbc606aefc7eb0e79e02b59c8ca\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:05Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ca5f9d4b653828e0fc089f8482414f7c86988dc9cb67573143ea24e8f3fac8db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a9b3178c102859fc98fdc4a8062930f4c3335c8a174036e7bd1665d658cd9f4c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c21ac3f3091dd525e38b5ab79aa56b1e5d6a924e74e6dd58f7bdf7e71463434f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.399930 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f3bab695-50e7-421a-a4ff-901ab01fd6e7\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T08:49:15Z\\\",\\\"message\\\":\\\"W1125 08:49:03.727843 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1125 08:49:03.728121 1 crypto.go:601] Generating new CA for check-endpoints-signer@1764060543 cert, and key in /tmp/serving-cert-1289068424/serving-signer.crt, /tmp/serving-cert-1289068424/serving-signer.key\\\\nI1125 08:49:04.891121 1 observer_polling.go:159] Starting file observer\\\\nW1125 08:49:04.893645 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1125 08:49:04.893875 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 08:49:04.894914 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1289068424/tls.crt::/tmp/serving-cert-1289068424/tls.key\\\\\\\"\\\\nF1125 08:49:15.315556 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:03Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T08:49:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.413011 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"877a7830-a0ab-45e4-9cd4-84a2b180f10b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://86ef2c0eaa53b69252d99024e978a7beef2518dc8558408468fd5c48942d69a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://28f187028aa1273e60726aad0dbaf3e1fb058903a84559e1561f2da43d6bc0b0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://376d43f4cf67407fb50b61039e786498e6336d8b596bf1e712639d656d3a27f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T08:49:00Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.429842 4932 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T08:49:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://59fa0059682e1d02855f6a9949606363c61ba6c4eef2458510e803f776d4b9d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T08:49:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T08:50:24Z is after 2025-08-24T17:21:41Z" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.458379 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.458415 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.458424 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.458438 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.458449 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:24Z","lastTransitionTime":"2025-11-25T08:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.560627 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.560676 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.560694 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.560715 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.560732 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:24Z","lastTransitionTime":"2025-11-25T08:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.664093 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.664141 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.664154 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.664170 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.664205 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:24Z","lastTransitionTime":"2025-11-25T08:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.767785 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.767858 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.767885 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.767917 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.767939 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:24Z","lastTransitionTime":"2025-11-25T08:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.870630 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.870689 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.870711 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.870739 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.870762 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:24Z","lastTransitionTime":"2025-11-25T08:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.974351 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.974574 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.974594 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.974617 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:24 crc kubenswrapper[4932]: I1125 08:50:24.974634 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:24Z","lastTransitionTime":"2025-11-25T08:50:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.077653 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.077728 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.077752 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.077779 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.077799 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:25Z","lastTransitionTime":"2025-11-25T08:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.180652 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.180718 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.180730 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.180754 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.180766 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:25Z","lastTransitionTime":"2025-11-25T08:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.283538 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.283590 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.283599 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.283614 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.283625 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:25Z","lastTransitionTime":"2025-11-25T08:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.386564 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.386630 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.386653 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.386682 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.386706 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:25Z","lastTransitionTime":"2025-11-25T08:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.489874 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.489905 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.489914 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.489928 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.489936 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:25Z","lastTransitionTime":"2025-11-25T08:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.512485 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.512593 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.512630 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.512716 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.512677106 +0000 UTC m=+149.638706719 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.512746 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.512749 4932 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.512811 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.512851 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.512822702 +0000 UTC m=+149.638852305 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.512764 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.512933 4932 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.512948 4932 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.512993 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.512979768 +0000 UTC m=+149.639009371 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.513009 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.512893 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.513045 4932 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.513088 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.513054851 +0000 UTC m=+149.639084444 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.513101 4932 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.513155 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.513138444 +0000 UTC m=+149.639168107 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.592952 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.593076 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.593106 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.593138 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.593162 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:25Z","lastTransitionTime":"2025-11-25T08:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.606142 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.606251 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.606322 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.606141 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.606395 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.606501 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.606745 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:25 crc kubenswrapper[4932]: E1125 08:50:25.606949 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.696120 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.696163 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.696174 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.696217 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.696230 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:25Z","lastTransitionTime":"2025-11-25T08:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.799695 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.799763 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.799780 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.799805 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.799823 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:25Z","lastTransitionTime":"2025-11-25T08:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.902843 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.902923 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.902943 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.902966 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:25 crc kubenswrapper[4932]: I1125 08:50:25.902985 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:25Z","lastTransitionTime":"2025-11-25T08:50:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.005513 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.005550 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.005563 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.005579 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.005590 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:26Z","lastTransitionTime":"2025-11-25T08:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.108904 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.108959 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.108976 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.109000 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.109017 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:26Z","lastTransitionTime":"2025-11-25T08:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.211863 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.211945 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.211970 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.212226 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.212253 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:26Z","lastTransitionTime":"2025-11-25T08:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.315091 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.315147 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.315164 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.315220 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.315238 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:26Z","lastTransitionTime":"2025-11-25T08:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.417294 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.417360 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.417383 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.417413 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.417439 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:26Z","lastTransitionTime":"2025-11-25T08:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.520135 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.520253 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.520274 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.520301 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.520319 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:26Z","lastTransitionTime":"2025-11-25T08:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.622829 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.622885 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.622908 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.622931 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.622949 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:26Z","lastTransitionTime":"2025-11-25T08:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.726609 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.726671 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.726688 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.726713 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.726732 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:26Z","lastTransitionTime":"2025-11-25T08:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.830452 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.830523 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.830545 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.830568 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.830588 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:26Z","lastTransitionTime":"2025-11-25T08:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.934120 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.934511 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.934532 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.934558 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:26 crc kubenswrapper[4932]: I1125 08:50:26.934576 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:26Z","lastTransitionTime":"2025-11-25T08:50:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.038443 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.038495 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.038512 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.038540 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.038558 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:27Z","lastTransitionTime":"2025-11-25T08:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.141776 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.141856 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.141879 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.141906 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.141927 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:27Z","lastTransitionTime":"2025-11-25T08:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.244815 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.244882 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.244921 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.244953 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.244976 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:27Z","lastTransitionTime":"2025-11-25T08:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.348101 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.348236 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.348268 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.348297 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.348320 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:27Z","lastTransitionTime":"2025-11-25T08:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.451536 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.451611 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.451656 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.451680 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.451699 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:27Z","lastTransitionTime":"2025-11-25T08:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.554854 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.554916 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.554931 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.554959 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.554976 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:27Z","lastTransitionTime":"2025-11-25T08:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.605148 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.605236 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.605166 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.605323 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:27 crc kubenswrapper[4932]: E1125 08:50:27.605484 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:27 crc kubenswrapper[4932]: E1125 08:50:27.605661 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:27 crc kubenswrapper[4932]: E1125 08:50:27.606020 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:27 crc kubenswrapper[4932]: E1125 08:50:27.606252 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.657990 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.658039 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.658057 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.658079 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.658096 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:27Z","lastTransitionTime":"2025-11-25T08:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.761565 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.761633 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.761657 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.761725 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.761746 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:27Z","lastTransitionTime":"2025-11-25T08:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.864317 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.864406 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.864455 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.864479 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.864497 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:27Z","lastTransitionTime":"2025-11-25T08:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.967108 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.967176 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.967230 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.967262 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:27 crc kubenswrapper[4932]: I1125 08:50:27.967288 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:27Z","lastTransitionTime":"2025-11-25T08:50:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.070989 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.071114 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.071156 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.071235 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.071263 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:28Z","lastTransitionTime":"2025-11-25T08:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.174330 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.174400 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.174420 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.174454 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.174490 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:28Z","lastTransitionTime":"2025-11-25T08:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.277114 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.277185 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.277250 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.277273 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.277290 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:28Z","lastTransitionTime":"2025-11-25T08:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.380318 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.380392 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.380408 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.380433 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.380452 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:28Z","lastTransitionTime":"2025-11-25T08:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.482991 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.483026 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.483039 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.483054 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.483067 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:28Z","lastTransitionTime":"2025-11-25T08:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.585869 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.585927 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.585945 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.585969 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.585987 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:28Z","lastTransitionTime":"2025-11-25T08:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.688628 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.688681 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.688701 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.688781 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.688801 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:28Z","lastTransitionTime":"2025-11-25T08:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.791691 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.791758 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.791776 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.791801 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.791819 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:28Z","lastTransitionTime":"2025-11-25T08:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.894814 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.894861 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.894874 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.894892 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.894905 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:28Z","lastTransitionTime":"2025-11-25T08:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.997629 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.997696 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.997715 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.997742 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:28 crc kubenswrapper[4932]: I1125 08:50:28.997767 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:28Z","lastTransitionTime":"2025-11-25T08:50:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.099945 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.099975 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.099983 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.099996 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.100004 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:29Z","lastTransitionTime":"2025-11-25T08:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.201932 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.202000 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.202020 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.202047 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.202064 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:29Z","lastTransitionTime":"2025-11-25T08:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.304674 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.304731 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.304741 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.304756 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.304771 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:29Z","lastTransitionTime":"2025-11-25T08:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.407985 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.408064 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.408096 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.408124 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.408145 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:29Z","lastTransitionTime":"2025-11-25T08:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.510747 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.510806 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.510825 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.510850 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.510866 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:29Z","lastTransitionTime":"2025-11-25T08:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.605931 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.606054 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.606151 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.606169 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:29 crc kubenswrapper[4932]: E1125 08:50:29.606387 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:29 crc kubenswrapper[4932]: E1125 08:50:29.606527 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:29 crc kubenswrapper[4932]: E1125 08:50:29.606664 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:29 crc kubenswrapper[4932]: E1125 08:50:29.606759 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.613824 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.613899 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.613923 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.613953 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.613975 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:29Z","lastTransitionTime":"2025-11-25T08:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.717080 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.717352 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.717372 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.717398 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.717418 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:29Z","lastTransitionTime":"2025-11-25T08:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.820007 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.820697 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.820844 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.820935 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.821025 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:29Z","lastTransitionTime":"2025-11-25T08:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.923783 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.924118 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.924486 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.924646 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:29 crc kubenswrapper[4932]: I1125 08:50:29.924828 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:29Z","lastTransitionTime":"2025-11-25T08:50:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.028268 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.028333 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.028350 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.028373 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.028393 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:30Z","lastTransitionTime":"2025-11-25T08:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.131248 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.131292 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.131303 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.131318 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.131329 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:30Z","lastTransitionTime":"2025-11-25T08:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.233800 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.233952 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.233981 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.234004 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.234026 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:30Z","lastTransitionTime":"2025-11-25T08:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.336656 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.336727 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.336745 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.336768 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.336786 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:30Z","lastTransitionTime":"2025-11-25T08:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.439799 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.439864 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.439880 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.439904 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.439921 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:30Z","lastTransitionTime":"2025-11-25T08:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.542718 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.542760 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.542768 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.542782 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.542791 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:30Z","lastTransitionTime":"2025-11-25T08:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.645452 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.645528 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.645550 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.645579 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.645604 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:30Z","lastTransitionTime":"2025-11-25T08:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.652811 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=9.652745261 podStartE2EDuration="9.652745261s" podCreationTimestamp="2025-11-25 08:50:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:30.630875751 +0000 UTC m=+90.756905344" watchObservedRunningTime="2025-11-25 08:50:30.652745261 +0000 UTC m=+90.778774864" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.699331 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=35.699302899 podStartE2EDuration="35.699302899s" podCreationTimestamp="2025-11-25 08:49:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:30.652484931 +0000 UTC m=+90.778514564" watchObservedRunningTime="2025-11-25 08:50:30.699302899 +0000 UTC m=+90.825332492" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.750233 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.750377 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.752913 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.753046 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.753071 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:30Z","lastTransitionTime":"2025-11-25T08:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.771498 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-fft4x" podStartSLOduration=68.771479623 podStartE2EDuration="1m8.771479623s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:30.752617761 +0000 UTC m=+90.878647384" watchObservedRunningTime="2025-11-25 08:50:30.771479623 +0000 UTC m=+90.897509186" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.795288 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podStartSLOduration=68.795267128 podStartE2EDuration="1m8.795267128s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:30.777737517 +0000 UTC m=+90.903767110" watchObservedRunningTime="2025-11-25 08:50:30.795267128 +0000 UTC m=+90.921296701" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.795528 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-jmvtb" podStartSLOduration=68.795519207 podStartE2EDuration="1m8.795519207s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:30.794781799 +0000 UTC m=+90.920811412" watchObservedRunningTime="2025-11-25 08:50:30.795519207 +0000 UTC m=+90.921548780" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.808731 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-8jl2g" podStartSLOduration=68.80871328 podStartE2EDuration="1m8.80871328s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:30.808567764 +0000 UTC m=+90.934597357" watchObservedRunningTime="2025-11-25 08:50:30.80871328 +0000 UTC m=+90.934742843" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.856551 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.856608 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.856624 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.856648 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.856665 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:30Z","lastTransitionTime":"2025-11-25T08:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.958901 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.958927 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.958936 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.958947 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.958956 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:30Z","lastTransitionTime":"2025-11-25T08:50:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.960336 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-pb6ll" podStartSLOduration=70.9603266 podStartE2EDuration="1m10.9603266s" podCreationTimestamp="2025-11-25 08:49:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:30.946184331 +0000 UTC m=+91.072213894" watchObservedRunningTime="2025-11-25 08:50:30.9603266 +0000 UTC m=+91.086356163" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.960661 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-kvhb4" podStartSLOduration=68.960657213 podStartE2EDuration="1m8.960657213s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:30.960154833 +0000 UTC m=+91.086184406" watchObservedRunningTime="2025-11-25 08:50:30.960657213 +0000 UTC m=+91.086686776" Nov 25 08:50:30 crc kubenswrapper[4932]: I1125 08:50:30.988452 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=66.988433202 podStartE2EDuration="1m6.988433202s" podCreationTimestamp="2025-11-25 08:49:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:30.987512796 +0000 UTC m=+91.113542369" watchObservedRunningTime="2025-11-25 08:50:30.988433202 +0000 UTC m=+91.114462755" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.004385 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=70.004370461 podStartE2EDuration="1m10.004370461s" podCreationTimestamp="2025-11-25 08:49:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:31.003947695 +0000 UTC m=+91.129977258" watchObservedRunningTime="2025-11-25 08:50:31.004370461 +0000 UTC m=+91.130400024" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.016505 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=69.016495252 podStartE2EDuration="1m9.016495252s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:31.015739113 +0000 UTC m=+91.141768676" watchObservedRunningTime="2025-11-25 08:50:31.016495252 +0000 UTC m=+91.142524815" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.061480 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.061526 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.061595 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.061612 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.061624 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:31Z","lastTransitionTime":"2025-11-25T08:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.164670 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.164712 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.164721 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.164736 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.164750 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:31Z","lastTransitionTime":"2025-11-25T08:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.268750 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.268835 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.268860 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.268888 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.268912 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:31Z","lastTransitionTime":"2025-11-25T08:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.371582 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.371623 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.371633 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.371648 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.371657 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:31Z","lastTransitionTime":"2025-11-25T08:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.474146 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.474207 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.474218 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.474231 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.474240 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:31Z","lastTransitionTime":"2025-11-25T08:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.577060 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.577113 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.577131 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.577156 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.577174 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:31Z","lastTransitionTime":"2025-11-25T08:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.605783 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.605847 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:31 crc kubenswrapper[4932]: E1125 08:50:31.605921 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:31 crc kubenswrapper[4932]: E1125 08:50:31.606046 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.606106 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:31 crc kubenswrapper[4932]: E1125 08:50:31.606167 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.606241 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:31 crc kubenswrapper[4932]: E1125 08:50:31.606294 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.680265 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.680329 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.680341 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.680363 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.680375 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:31Z","lastTransitionTime":"2025-11-25T08:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.789835 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.789893 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.789912 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.789935 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.789952 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:31Z","lastTransitionTime":"2025-11-25T08:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.893080 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.893158 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.893182 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.893249 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.893271 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:31Z","lastTransitionTime":"2025-11-25T08:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.995934 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.996002 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.996032 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.996064 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:31 crc kubenswrapper[4932]: I1125 08:50:31.996088 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:31Z","lastTransitionTime":"2025-11-25T08:50:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.099400 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.099460 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.099479 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.099505 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.099538 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:32Z","lastTransitionTime":"2025-11-25T08:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.202017 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.202074 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.202091 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.202114 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.202134 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:32Z","lastTransitionTime":"2025-11-25T08:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.305517 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.305584 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.305608 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.305636 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.305659 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:32Z","lastTransitionTime":"2025-11-25T08:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.408374 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.408450 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.408474 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.408503 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.408526 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:32Z","lastTransitionTime":"2025-11-25T08:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.511690 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.511767 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.511792 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.511822 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.511845 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:32Z","lastTransitionTime":"2025-11-25T08:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.613632 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.613689 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.613706 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.613732 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.613751 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:32Z","lastTransitionTime":"2025-11-25T08:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.716551 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.716619 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.716643 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.716672 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.716696 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:32Z","lastTransitionTime":"2025-11-25T08:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.820013 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.820059 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.820070 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.820088 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.820103 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:32Z","lastTransitionTime":"2025-11-25T08:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.922521 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.922549 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.922558 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.922571 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:32 crc kubenswrapper[4932]: I1125 08:50:32.922580 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:32Z","lastTransitionTime":"2025-11-25T08:50:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.025445 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.025477 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.025484 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.025496 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.025505 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:33Z","lastTransitionTime":"2025-11-25T08:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.072694 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.072740 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.072755 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.072770 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.072780 4932 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T08:50:33Z","lastTransitionTime":"2025-11-25T08:50:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.111405 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5"] Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.111720 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.113355 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.113721 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.113836 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.118002 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.193957 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/88d3af81-439f-48b7-b100-83c9796811a3-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.194052 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/88d3af81-439f-48b7-b100-83c9796811a3-service-ca\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.194087 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/88d3af81-439f-48b7-b100-83c9796811a3-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.194115 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/88d3af81-439f-48b7-b100-83c9796811a3-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.194139 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/88d3af81-439f-48b7-b100-83c9796811a3-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.295162 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/88d3af81-439f-48b7-b100-83c9796811a3-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.295322 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/88d3af81-439f-48b7-b100-83c9796811a3-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.295404 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/88d3af81-439f-48b7-b100-83c9796811a3-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.295450 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/88d3af81-439f-48b7-b100-83c9796811a3-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.295534 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/88d3af81-439f-48b7-b100-83c9796811a3-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.295541 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/88d3af81-439f-48b7-b100-83c9796811a3-service-ca\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.295584 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/88d3af81-439f-48b7-b100-83c9796811a3-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.296291 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/88d3af81-439f-48b7-b100-83c9796811a3-service-ca\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.304238 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/88d3af81-439f-48b7-b100-83c9796811a3-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.312356 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/88d3af81-439f-48b7-b100-83c9796811a3-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-ffkb5\" (UID: \"88d3af81-439f-48b7-b100-83c9796811a3\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.429986 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.605088 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.605102 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.605214 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:33 crc kubenswrapper[4932]: I1125 08:50:33.605276 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:33 crc kubenswrapper[4932]: E1125 08:50:33.605558 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:33 crc kubenswrapper[4932]: E1125 08:50:33.605863 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:33 crc kubenswrapper[4932]: E1125 08:50:33.605913 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:33 crc kubenswrapper[4932]: E1125 08:50:33.605934 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:34 crc kubenswrapper[4932]: I1125 08:50:34.178218 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" event={"ID":"88d3af81-439f-48b7-b100-83c9796811a3","Type":"ContainerStarted","Data":"8d882bfe66337e3ac4998183163f5192ca538beb98919f09d65c91ed3ec25c94"} Nov 25 08:50:34 crc kubenswrapper[4932]: I1125 08:50:34.178304 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" event={"ID":"88d3af81-439f-48b7-b100-83c9796811a3","Type":"ContainerStarted","Data":"163d97eeb2ceaa95e7ddf0daba8f56f02f26398e5e12ae2d49f15fe59113f3af"} Nov 25 08:50:34 crc kubenswrapper[4932]: I1125 08:50:34.201947 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-ffkb5" podStartSLOduration=72.201916161 podStartE2EDuration="1m12.201916161s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:50:34.200880821 +0000 UTC m=+94.326910404" watchObservedRunningTime="2025-11-25 08:50:34.201916161 +0000 UTC m=+94.327945764" Nov 25 08:50:35 crc kubenswrapper[4932]: I1125 08:50:35.605323 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:35 crc kubenswrapper[4932]: I1125 08:50:35.605388 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:35 crc kubenswrapper[4932]: E1125 08:50:35.605469 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:35 crc kubenswrapper[4932]: I1125 08:50:35.605550 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:35 crc kubenswrapper[4932]: I1125 08:50:35.605579 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:35 crc kubenswrapper[4932]: E1125 08:50:35.605723 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:35 crc kubenswrapper[4932]: E1125 08:50:35.605759 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:35 crc kubenswrapper[4932]: E1125 08:50:35.605809 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:36 crc kubenswrapper[4932]: I1125 08:50:36.609564 4932 scope.go:117] "RemoveContainer" containerID="0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6" Nov 25 08:50:36 crc kubenswrapper[4932]: E1125 08:50:36.609772 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" Nov 25 08:50:37 crc kubenswrapper[4932]: I1125 08:50:37.605352 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:37 crc kubenswrapper[4932]: I1125 08:50:37.605451 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:37 crc kubenswrapper[4932]: E1125 08:50:37.605508 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:37 crc kubenswrapper[4932]: I1125 08:50:37.605548 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:37 crc kubenswrapper[4932]: E1125 08:50:37.605660 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:37 crc kubenswrapper[4932]: I1125 08:50:37.605449 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:37 crc kubenswrapper[4932]: E1125 08:50:37.605781 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:37 crc kubenswrapper[4932]: E1125 08:50:37.605889 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:39 crc kubenswrapper[4932]: I1125 08:50:39.605012 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:39 crc kubenswrapper[4932]: I1125 08:50:39.605078 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:39 crc kubenswrapper[4932]: E1125 08:50:39.605150 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:39 crc kubenswrapper[4932]: E1125 08:50:39.605287 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:39 crc kubenswrapper[4932]: I1125 08:50:39.605357 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:39 crc kubenswrapper[4932]: E1125 08:50:39.605472 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:39 crc kubenswrapper[4932]: I1125 08:50:39.606442 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:39 crc kubenswrapper[4932]: E1125 08:50:39.606706 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:40 crc kubenswrapper[4932]: I1125 08:50:40.385283 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:40 crc kubenswrapper[4932]: E1125 08:50:40.385445 4932 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:50:40 crc kubenswrapper[4932]: E1125 08:50:40.385540 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs podName:58f40128-d3fc-4588-ad8f-8cf129079911 nodeName:}" failed. No retries permitted until 2025-11-25 08:51:44.385520156 +0000 UTC m=+164.511549709 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs") pod "network-metrics-daemon-fvbqs" (UID: "58f40128-d3fc-4588-ad8f-8cf129079911") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 08:50:41 crc kubenswrapper[4932]: I1125 08:50:41.604911 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:41 crc kubenswrapper[4932]: I1125 08:50:41.604958 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:41 crc kubenswrapper[4932]: I1125 08:50:41.604912 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:41 crc kubenswrapper[4932]: E1125 08:50:41.605090 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:41 crc kubenswrapper[4932]: E1125 08:50:41.605232 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:41 crc kubenswrapper[4932]: I1125 08:50:41.605299 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:41 crc kubenswrapper[4932]: E1125 08:50:41.605393 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:41 crc kubenswrapper[4932]: E1125 08:50:41.605539 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:43 crc kubenswrapper[4932]: I1125 08:50:43.605100 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:43 crc kubenswrapper[4932]: E1125 08:50:43.605305 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:43 crc kubenswrapper[4932]: I1125 08:50:43.605443 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:43 crc kubenswrapper[4932]: I1125 08:50:43.605589 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:43 crc kubenswrapper[4932]: E1125 08:50:43.605786 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:43 crc kubenswrapper[4932]: I1125 08:50:43.605901 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:43 crc kubenswrapper[4932]: E1125 08:50:43.605978 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:43 crc kubenswrapper[4932]: E1125 08:50:43.606235 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:45 crc kubenswrapper[4932]: I1125 08:50:45.605172 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:45 crc kubenswrapper[4932]: I1125 08:50:45.605352 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:45 crc kubenswrapper[4932]: E1125 08:50:45.605469 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:45 crc kubenswrapper[4932]: I1125 08:50:45.605534 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:45 crc kubenswrapper[4932]: E1125 08:50:45.605654 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:45 crc kubenswrapper[4932]: I1125 08:50:45.605686 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:45 crc kubenswrapper[4932]: E1125 08:50:45.605789 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:45 crc kubenswrapper[4932]: E1125 08:50:45.605883 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:47 crc kubenswrapper[4932]: I1125 08:50:47.605506 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:47 crc kubenswrapper[4932]: I1125 08:50:47.605627 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:47 crc kubenswrapper[4932]: E1125 08:50:47.605690 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:47 crc kubenswrapper[4932]: I1125 08:50:47.605522 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:47 crc kubenswrapper[4932]: I1125 08:50:47.605521 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:47 crc kubenswrapper[4932]: E1125 08:50:47.605813 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:47 crc kubenswrapper[4932]: E1125 08:50:47.605908 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:47 crc kubenswrapper[4932]: E1125 08:50:47.606027 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:49 crc kubenswrapper[4932]: I1125 08:50:49.605049 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:49 crc kubenswrapper[4932]: I1125 08:50:49.605092 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:49 crc kubenswrapper[4932]: I1125 08:50:49.605441 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:49 crc kubenswrapper[4932]: I1125 08:50:49.605462 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:49 crc kubenswrapper[4932]: E1125 08:50:49.605613 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:49 crc kubenswrapper[4932]: I1125 08:50:49.605823 4932 scope.go:117] "RemoveContainer" containerID="0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6" Nov 25 08:50:49 crc kubenswrapper[4932]: E1125 08:50:49.605913 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:49 crc kubenswrapper[4932]: E1125 08:50:49.606017 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:49 crc kubenswrapper[4932]: E1125 08:50:49.606209 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" Nov 25 08:50:49 crc kubenswrapper[4932]: E1125 08:50:49.606177 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:51 crc kubenswrapper[4932]: I1125 08:50:51.604914 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:51 crc kubenswrapper[4932]: I1125 08:50:51.605017 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:51 crc kubenswrapper[4932]: I1125 08:50:51.605038 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:51 crc kubenswrapper[4932]: I1125 08:50:51.605078 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:51 crc kubenswrapper[4932]: E1125 08:50:51.605088 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:51 crc kubenswrapper[4932]: E1125 08:50:51.605301 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:51 crc kubenswrapper[4932]: E1125 08:50:51.605534 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:51 crc kubenswrapper[4932]: E1125 08:50:51.605689 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:53 crc kubenswrapper[4932]: I1125 08:50:53.605552 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:53 crc kubenswrapper[4932]: I1125 08:50:53.605770 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:53 crc kubenswrapper[4932]: E1125 08:50:53.605881 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:53 crc kubenswrapper[4932]: E1125 08:50:53.605977 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:53 crc kubenswrapper[4932]: I1125 08:50:53.606040 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:53 crc kubenswrapper[4932]: E1125 08:50:53.606170 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:53 crc kubenswrapper[4932]: I1125 08:50:53.606048 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:53 crc kubenswrapper[4932]: E1125 08:50:53.606368 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:55 crc kubenswrapper[4932]: I1125 08:50:55.621234 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:55 crc kubenswrapper[4932]: I1125 08:50:55.621413 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:55 crc kubenswrapper[4932]: I1125 08:50:55.621758 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:55 crc kubenswrapper[4932]: I1125 08:50:55.621936 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:55 crc kubenswrapper[4932]: E1125 08:50:55.622100 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:55 crc kubenswrapper[4932]: E1125 08:50:55.621928 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:55 crc kubenswrapper[4932]: E1125 08:50:55.622293 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:55 crc kubenswrapper[4932]: E1125 08:50:55.622436 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:57 crc kubenswrapper[4932]: I1125 08:50:57.605480 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:57 crc kubenswrapper[4932]: E1125 08:50:57.605645 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:50:57 crc kubenswrapper[4932]: I1125 08:50:57.605943 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:57 crc kubenswrapper[4932]: E1125 08:50:57.606034 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:57 crc kubenswrapper[4932]: I1125 08:50:57.606253 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:57 crc kubenswrapper[4932]: E1125 08:50:57.606337 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:57 crc kubenswrapper[4932]: I1125 08:50:57.607514 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:57 crc kubenswrapper[4932]: E1125 08:50:57.607714 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:59 crc kubenswrapper[4932]: I1125 08:50:59.605592 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:50:59 crc kubenswrapper[4932]: I1125 08:50:59.605625 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:50:59 crc kubenswrapper[4932]: I1125 08:50:59.605803 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:50:59 crc kubenswrapper[4932]: I1125 08:50:59.605826 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:50:59 crc kubenswrapper[4932]: E1125 08:50:59.605873 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:50:59 crc kubenswrapper[4932]: E1125 08:50:59.606017 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:50:59 crc kubenswrapper[4932]: E1125 08:50:59.606143 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:50:59 crc kubenswrapper[4932]: E1125 08:50:59.606258 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:00 crc kubenswrapper[4932]: I1125 08:51:00.271817 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kvhb4_199dbdf9-e2fc-459e-9e17-f5d520309f0a/kube-multus/1.log" Nov 25 08:51:00 crc kubenswrapper[4932]: I1125 08:51:00.272895 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kvhb4_199dbdf9-e2fc-459e-9e17-f5d520309f0a/kube-multus/0.log" Nov 25 08:51:00 crc kubenswrapper[4932]: I1125 08:51:00.272972 4932 generic.go:334] "Generic (PLEG): container finished" podID="199dbdf9-e2fc-459e-9e17-f5d520309f0a" containerID="154dcd3feae41470aa678f3bfdfae9a5a4af769b14c800d21e37351835697115" exitCode=1 Nov 25 08:51:00 crc kubenswrapper[4932]: I1125 08:51:00.273031 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kvhb4" event={"ID":"199dbdf9-e2fc-459e-9e17-f5d520309f0a","Type":"ContainerDied","Data":"154dcd3feae41470aa678f3bfdfae9a5a4af769b14c800d21e37351835697115"} Nov 25 08:51:00 crc kubenswrapper[4932]: I1125 08:51:00.273116 4932 scope.go:117] "RemoveContainer" containerID="2803d9e4230b545526946da210b02c3bdd76193d7d8ac9abc97e80a78496826f" Nov 25 08:51:00 crc kubenswrapper[4932]: I1125 08:51:00.273742 4932 scope.go:117] "RemoveContainer" containerID="154dcd3feae41470aa678f3bfdfae9a5a4af769b14c800d21e37351835697115" Nov 25 08:51:00 crc kubenswrapper[4932]: E1125 08:51:00.274021 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-kvhb4_openshift-multus(199dbdf9-e2fc-459e-9e17-f5d520309f0a)\"" pod="openshift-multus/multus-kvhb4" podUID="199dbdf9-e2fc-459e-9e17-f5d520309f0a" Nov 25 08:51:00 crc kubenswrapper[4932]: I1125 08:51:00.606750 4932 scope.go:117] "RemoveContainer" containerID="0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6" Nov 25 08:51:00 crc kubenswrapper[4932]: E1125 08:51:00.606905 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-rlhks_openshift-ovn-kubernetes(24f5eec6-6332-4bae-bce3-4faa1156c249)\"" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" Nov 25 08:51:00 crc kubenswrapper[4932]: E1125 08:51:00.625813 4932 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 25 08:51:00 crc kubenswrapper[4932]: E1125 08:51:00.770558 4932 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 08:51:01 crc kubenswrapper[4932]: I1125 08:51:01.279148 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kvhb4_199dbdf9-e2fc-459e-9e17-f5d520309f0a/kube-multus/1.log" Nov 25 08:51:01 crc kubenswrapper[4932]: I1125 08:51:01.605643 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:01 crc kubenswrapper[4932]: I1125 08:51:01.605731 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:01 crc kubenswrapper[4932]: I1125 08:51:01.605732 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:01 crc kubenswrapper[4932]: I1125 08:51:01.605648 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:01 crc kubenswrapper[4932]: E1125 08:51:01.605832 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:51:01 crc kubenswrapper[4932]: E1125 08:51:01.606009 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:51:01 crc kubenswrapper[4932]: E1125 08:51:01.606114 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:01 crc kubenswrapper[4932]: E1125 08:51:01.606383 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:51:03 crc kubenswrapper[4932]: I1125 08:51:03.605178 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:03 crc kubenswrapper[4932]: I1125 08:51:03.605268 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:03 crc kubenswrapper[4932]: E1125 08:51:03.605359 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:51:03 crc kubenswrapper[4932]: I1125 08:51:03.605462 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:03 crc kubenswrapper[4932]: I1125 08:51:03.605783 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:03 crc kubenswrapper[4932]: E1125 08:51:03.605857 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:51:03 crc kubenswrapper[4932]: E1125 08:51:03.606010 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:51:03 crc kubenswrapper[4932]: E1125 08:51:03.606144 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:05 crc kubenswrapper[4932]: I1125 08:51:05.604836 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:05 crc kubenswrapper[4932]: I1125 08:51:05.604951 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:05 crc kubenswrapper[4932]: E1125 08:51:05.604996 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:51:05 crc kubenswrapper[4932]: I1125 08:51:05.605035 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:05 crc kubenswrapper[4932]: I1125 08:51:05.605002 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:05 crc kubenswrapper[4932]: E1125 08:51:05.605171 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:51:05 crc kubenswrapper[4932]: E1125 08:51:05.605272 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:05 crc kubenswrapper[4932]: E1125 08:51:05.605327 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:51:05 crc kubenswrapper[4932]: E1125 08:51:05.771531 4932 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 08:51:07 crc kubenswrapper[4932]: I1125 08:51:07.605825 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:07 crc kubenswrapper[4932]: I1125 08:51:07.605972 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:07 crc kubenswrapper[4932]: E1125 08:51:07.606137 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:07 crc kubenswrapper[4932]: I1125 08:51:07.606264 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:07 crc kubenswrapper[4932]: I1125 08:51:07.606308 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:07 crc kubenswrapper[4932]: E1125 08:51:07.606407 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:51:07 crc kubenswrapper[4932]: E1125 08:51:07.606542 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:51:07 crc kubenswrapper[4932]: E1125 08:51:07.606609 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:51:09 crc kubenswrapper[4932]: I1125 08:51:09.605800 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:09 crc kubenswrapper[4932]: E1125 08:51:09.606297 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:51:09 crc kubenswrapper[4932]: I1125 08:51:09.605850 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:09 crc kubenswrapper[4932]: I1125 08:51:09.605951 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:09 crc kubenswrapper[4932]: E1125 08:51:09.606522 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:09 crc kubenswrapper[4932]: I1125 08:51:09.605922 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:09 crc kubenswrapper[4932]: E1125 08:51:09.606590 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:51:09 crc kubenswrapper[4932]: E1125 08:51:09.606701 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:51:10 crc kubenswrapper[4932]: E1125 08:51:10.772017 4932 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 08:51:11 crc kubenswrapper[4932]: I1125 08:51:11.605496 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:11 crc kubenswrapper[4932]: I1125 08:51:11.605501 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:11 crc kubenswrapper[4932]: E1125 08:51:11.605684 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:51:11 crc kubenswrapper[4932]: I1125 08:51:11.605621 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:11 crc kubenswrapper[4932]: E1125 08:51:11.605761 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:51:11 crc kubenswrapper[4932]: I1125 08:51:11.605548 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:11 crc kubenswrapper[4932]: E1125 08:51:11.605832 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:51:11 crc kubenswrapper[4932]: E1125 08:51:11.606026 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:13 crc kubenswrapper[4932]: I1125 08:51:13.605221 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:13 crc kubenswrapper[4932]: I1125 08:51:13.605227 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:13 crc kubenswrapper[4932]: I1125 08:51:13.605245 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:13 crc kubenswrapper[4932]: I1125 08:51:13.605771 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:13 crc kubenswrapper[4932]: E1125 08:51:13.605950 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:13 crc kubenswrapper[4932]: E1125 08:51:13.606111 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:51:13 crc kubenswrapper[4932]: E1125 08:51:13.606306 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:51:13 crc kubenswrapper[4932]: E1125 08:51:13.606429 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:51:13 crc kubenswrapper[4932]: I1125 08:51:13.606598 4932 scope.go:117] "RemoveContainer" containerID="0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6" Nov 25 08:51:14 crc kubenswrapper[4932]: I1125 08:51:14.325751 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/3.log" Nov 25 08:51:14 crc kubenswrapper[4932]: I1125 08:51:14.329401 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerStarted","Data":"5149c5d917ba8bbafe5c58c0bec0047288114ac8df493b6958cff2293f98d2b9"} Nov 25 08:51:14 crc kubenswrapper[4932]: I1125 08:51:14.329894 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:51:14 crc kubenswrapper[4932]: I1125 08:51:14.377772 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podStartSLOduration=112.377743497 podStartE2EDuration="1m52.377743497s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:14.37456309 +0000 UTC m=+134.500592723" watchObservedRunningTime="2025-11-25 08:51:14.377743497 +0000 UTC m=+134.503773120" Nov 25 08:51:14 crc kubenswrapper[4932]: I1125 08:51:14.512075 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-fvbqs"] Nov 25 08:51:14 crc kubenswrapper[4932]: I1125 08:51:14.512262 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:14 crc kubenswrapper[4932]: E1125 08:51:14.512443 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:15 crc kubenswrapper[4932]: I1125 08:51:15.605453 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:15 crc kubenswrapper[4932]: I1125 08:51:15.605515 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:15 crc kubenswrapper[4932]: I1125 08:51:15.605464 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:15 crc kubenswrapper[4932]: E1125 08:51:15.605958 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:51:15 crc kubenswrapper[4932]: E1125 08:51:15.606089 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:51:15 crc kubenswrapper[4932]: E1125 08:51:15.606261 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:51:15 crc kubenswrapper[4932]: I1125 08:51:15.606312 4932 scope.go:117] "RemoveContainer" containerID="154dcd3feae41470aa678f3bfdfae9a5a4af769b14c800d21e37351835697115" Nov 25 08:51:15 crc kubenswrapper[4932]: E1125 08:51:15.773740 4932 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 08:51:16 crc kubenswrapper[4932]: I1125 08:51:16.338560 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kvhb4_199dbdf9-e2fc-459e-9e17-f5d520309f0a/kube-multus/1.log" Nov 25 08:51:16 crc kubenswrapper[4932]: I1125 08:51:16.338640 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kvhb4" event={"ID":"199dbdf9-e2fc-459e-9e17-f5d520309f0a","Type":"ContainerStarted","Data":"f45a3bd992f34b9cbe79f81c0d4c5cd880a266d1454be0b6ac82a7d3365272b1"} Nov 25 08:51:16 crc kubenswrapper[4932]: I1125 08:51:16.605327 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:16 crc kubenswrapper[4932]: E1125 08:51:16.605562 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:17 crc kubenswrapper[4932]: I1125 08:51:17.605759 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:17 crc kubenswrapper[4932]: I1125 08:51:17.605799 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:17 crc kubenswrapper[4932]: I1125 08:51:17.605764 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:17 crc kubenswrapper[4932]: E1125 08:51:17.605989 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:51:17 crc kubenswrapper[4932]: E1125 08:51:17.606066 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:51:17 crc kubenswrapper[4932]: E1125 08:51:17.606183 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:51:18 crc kubenswrapper[4932]: I1125 08:51:18.605568 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:18 crc kubenswrapper[4932]: E1125 08:51:18.605848 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:19 crc kubenswrapper[4932]: I1125 08:51:19.605791 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:19 crc kubenswrapper[4932]: I1125 08:51:19.605898 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:19 crc kubenswrapper[4932]: I1125 08:51:19.605962 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:19 crc kubenswrapper[4932]: E1125 08:51:19.606068 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 08:51:19 crc kubenswrapper[4932]: E1125 08:51:19.606268 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 08:51:19 crc kubenswrapper[4932]: E1125 08:51:19.606437 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 08:51:20 crc kubenswrapper[4932]: I1125 08:51:20.605918 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:20 crc kubenswrapper[4932]: E1125 08:51:20.607675 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-fvbqs" podUID="58f40128-d3fc-4588-ad8f-8cf129079911" Nov 25 08:51:21 crc kubenswrapper[4932]: I1125 08:51:21.605859 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:21 crc kubenswrapper[4932]: I1125 08:51:21.605916 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:21 crc kubenswrapper[4932]: I1125 08:51:21.605874 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:21 crc kubenswrapper[4932]: I1125 08:51:21.608496 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 08:51:21 crc kubenswrapper[4932]: I1125 08:51:21.609313 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 08:51:21 crc kubenswrapper[4932]: I1125 08:51:21.609366 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 08:51:21 crc kubenswrapper[4932]: I1125 08:51:21.609374 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 08:51:22 crc kubenswrapper[4932]: I1125 08:51:22.605666 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:22 crc kubenswrapper[4932]: I1125 08:51:22.608121 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 08:51:22 crc kubenswrapper[4932]: I1125 08:51:22.608123 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.016694 4932 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.070821 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-zffp5"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.071453 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.073561 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ctx5h"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.074507 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.077159 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-2x94m"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.078149 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.078411 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.084816 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.086075 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.087109 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.093290 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.101830 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.110261 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.110990 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.111314 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.112549 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.112769 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.112881 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113248 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113332 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66f325cc-3180-4c77-afdc-7a642717d31f-serving-cert\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113386 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/157b16a5-6638-4f93-b6ae-616cadd9eb21-audit-dir\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113411 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/157b16a5-6638-4f93-b6ae-616cadd9eb21-encryption-config\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113433 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113457 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-trusted-ca-bundle\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113479 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jb7sw\" (UniqueName: \"kubernetes.io/projected/157b16a5-6638-4f93-b6ae-616cadd9eb21-kube-api-access-jb7sw\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113502 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/157b16a5-6638-4f93-b6ae-616cadd9eb21-etcd-client\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113520 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-image-import-ca\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113540 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twnjh\" (UniqueName: \"kubernetes.io/projected/66f325cc-3180-4c77-afdc-7a642717d31f-kube-api-access-twnjh\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113572 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/157b16a5-6638-4f93-b6ae-616cadd9eb21-node-pullsecrets\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113593 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-audit\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113614 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-etcd-serving-ca\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113637 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-client-ca\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113658 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-config\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113678 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/157b16a5-6638-4f93-b6ae-616cadd9eb21-serving-cert\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113715 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-config\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113857 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.113904 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.114057 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.114167 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.114305 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.114440 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.114527 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.114575 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.114668 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.114775 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.115132 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.116473 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.116708 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.116737 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.116923 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.117022 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.117271 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.117476 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.117614 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-qtltp"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.117692 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.118176 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.118890 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.119380 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-qtltp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.126691 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.127377 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.127497 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.128005 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.128636 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.130235 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ksq5j"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.130605 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-5kf8q"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.130863 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.131105 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.134013 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.134158 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-t6qks"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.134883 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.135334 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2q4hk"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.137139 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.137578 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.137874 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.138310 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.138926 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.139785 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-jdjgs"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.140445 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.143062 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-c2l58"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.144098 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.144420 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.144638 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.144882 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.144891 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.145489 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.153328 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.154076 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.154785 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.155181 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.155258 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.155391 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.155467 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.156432 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.156502 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.156744 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.156876 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.156904 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.156961 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.156979 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157014 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157076 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157092 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157114 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157325 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157370 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157391 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157408 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157493 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157628 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157683 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.157963 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.158002 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.158078 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.158131 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.158160 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.158306 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.158410 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.158512 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.158611 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.158625 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.158678 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.166407 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.166676 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.167073 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.167313 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.167609 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.168120 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.158142 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.170164 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.177157 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.177396 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mgqrs"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.195039 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-bmncc"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.195305 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.195355 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.195806 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.196024 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.196258 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.200847 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.200956 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.201222 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.201335 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.202342 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.205643 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.205902 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.206012 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.206111 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.206043 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.206318 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.206363 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.206405 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.206770 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.207227 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.207966 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.212862 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ctx5h"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.213013 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.213239 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.213263 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.213428 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.213580 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.213746 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.213823 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214212 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214379 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214382 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214685 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214715 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-oauth-serving-cert\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214736 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-client-ca\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214752 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0ccea157-c5a1-4e31-958e-095aa3b77b80-trusted-ca\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214766 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6d9jl\" (UniqueName: \"kubernetes.io/projected/9945a2a9-7f64-4d7c-bab3-aca70803734d-kube-api-access-6d9jl\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214781 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64w4s\" (UniqueName: \"kubernetes.io/projected/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-kube-api-access-64w4s\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214796 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7710aca4-ceb7-4162-90b5-f8adc32e49bf-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214810 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmp6c\" (UniqueName: \"kubernetes.io/projected/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-kube-api-access-xmp6c\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214826 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cntkl\" (UniqueName: \"kubernetes.io/projected/91fee7b5-f700-4555-9ced-964fa79ba338-kube-api-access-cntkl\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214841 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/aa23081e-8667-4a54-a39f-8a8073436dd9-available-featuregates\") pod \"openshift-config-operator-7777fb866f-c2l58\" (UID: \"aa23081e-8667-4a54-a39f-8a8073436dd9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214866 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-config\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214881 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bp4mx\" (UniqueName: \"kubernetes.io/projected/aa23081e-8667-4a54-a39f-8a8073436dd9-kube-api-access-bp4mx\") pod \"openshift-config-operator-7777fb866f-c2l58\" (UID: \"aa23081e-8667-4a54-a39f-8a8073436dd9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214901 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-service-ca\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214928 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-trusted-ca-bundle\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214953 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/91fee7b5-f700-4555-9ced-964fa79ba338-etcd-ca\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214972 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a2c26b18-e2d1-44ff-91bf-44e7a6d1a097-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wb24h\" (UID: \"a2c26b18-e2d1-44ff-91bf-44e7a6d1a097\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.214989 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1446a2a9-8c10-4801-a3ce-2e08d66c81b2-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wdn94\" (UID: \"1446a2a9-8c10-4801-a3ce-2e08d66c81b2\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215017 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66f325cc-3180-4c77-afdc-7a642717d31f-serving-cert\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215032 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-config\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215461 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-config\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215502 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rpjw\" (UniqueName: \"kubernetes.io/projected/494bd606-c814-41b7-8c9a-f89487408a08-kube-api-access-5rpjw\") pod \"openshift-apiserver-operator-796bbdcf4f-txqrb\" (UID: \"494bd606-c814-41b7-8c9a-f89487408a08\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215521 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9945a2a9-7f64-4d7c-bab3-aca70803734d-serving-cert\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215535 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/333e3fc9-dd6b-4295-9555-1f8c66440d44-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cr7gs\" (UID: \"333e3fc9-dd6b-4295-9555-1f8c66440d44\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215581 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37c802fe-446b-4a2f-a17d-6db1eafb0318-serving-cert\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215607 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/157b16a5-6638-4f93-b6ae-616cadd9eb21-audit-dir\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215629 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-serving-cert\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215644 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czwl2\" (UniqueName: \"kubernetes.io/projected/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-kube-api-access-czwl2\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215661 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/157b16a5-6638-4f93-b6ae-616cadd9eb21-encryption-config\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215670 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/157b16a5-6638-4f93-b6ae-616cadd9eb21-audit-dir\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215690 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215706 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-config\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215735 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba443197-0445-4bc8-915a-d4d6f49bdea7-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjf9j\" (UID: \"ba443197-0445-4bc8-915a-d4d6f49bdea7\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215758 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9945a2a9-7f64-4d7c-bab3-aca70803734d-etcd-client\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215772 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91fee7b5-f700-4555-9ced-964fa79ba338-config\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215787 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dm2nl\" (UniqueName: \"kubernetes.io/projected/7710aca4-ceb7-4162-90b5-f8adc32e49bf-kube-api-access-dm2nl\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215803 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37c802fe-446b-4a2f-a17d-6db1eafb0318-service-ca-bundle\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215829 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcb76\" (UniqueName: \"kubernetes.io/projected/37c802fe-446b-4a2f-a17d-6db1eafb0318-kube-api-access-kcb76\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215846 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttvsx\" (UniqueName: \"kubernetes.io/projected/ba443197-0445-4bc8-915a-d4d6f49bdea7-kube-api-access-ttvsx\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjf9j\" (UID: \"ba443197-0445-4bc8-915a-d4d6f49bdea7\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215862 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/333e3fc9-dd6b-4295-9555-1f8c66440d44-config\") pod \"kube-controller-manager-operator-78b949d7b-cr7gs\" (UID: \"333e3fc9-dd6b-4295-9555-1f8c66440d44\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215877 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhhb8\" (UniqueName: \"kubernetes.io/projected/a2c26b18-e2d1-44ff-91bf-44e7a6d1a097-kube-api-access-rhhb8\") pod \"cluster-samples-operator-665b6dd947-wb24h\" (UID: \"a2c26b18-e2d1-44ff-91bf-44e7a6d1a097\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.215891 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-auth-proxy-config\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.216719 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.216749 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-trusted-ca-bundle\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.216868 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jb7sw\" (UniqueName: \"kubernetes.io/projected/157b16a5-6638-4f93-b6ae-616cadd9eb21-kube-api-access-jb7sw\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.216932 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/157b16a5-6638-4f93-b6ae-616cadd9eb21-etcd-client\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.216968 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-image-import-ca\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217002 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twnjh\" (UniqueName: \"kubernetes.io/projected/66f325cc-3180-4c77-afdc-7a642717d31f-kube-api-access-twnjh\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217064 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9cm2\" (UniqueName: \"kubernetes.io/projected/868f657f-d9b7-43c8-a706-a7657f16ce42-kube-api-access-l9cm2\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217105 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217147 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217209 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217242 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-trusted-ca\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217272 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37c802fe-446b-4a2f-a17d-6db1eafb0318-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217309 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/868f657f-d9b7-43c8-a706-a7657f16ce42-images\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217338 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba443197-0445-4bc8-915a-d4d6f49bdea7-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjf9j\" (UID: \"ba443197-0445-4bc8-915a-d4d6f49bdea7\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217368 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37c802fe-446b-4a2f-a17d-6db1eafb0318-config\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217398 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa23081e-8667-4a54-a39f-8a8073436dd9-serving-cert\") pod \"openshift-config-operator-7777fb866f-c2l58\" (UID: \"aa23081e-8667-4a54-a39f-8a8073436dd9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217428 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-oauth-config\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217467 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7710aca4-ceb7-4162-90b5-f8adc32e49bf-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217493 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0ccea157-c5a1-4e31-958e-095aa3b77b80-metrics-tls\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217513 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217534 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1446a2a9-8c10-4801-a3ce-2e08d66c81b2-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wdn94\" (UID: \"1446a2a9-8c10-4801-a3ce-2e08d66c81b2\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217560 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9945a2a9-7f64-4d7c-bab3-aca70803734d-audit-dir\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217579 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-config\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217600 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-audit-policies\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217637 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/157b16a5-6638-4f93-b6ae-616cadd9eb21-node-pullsecrets\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217670 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217692 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-serving-cert\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217715 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ccea157-c5a1-4e31-958e-095aa3b77b80-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217740 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/868f657f-d9b7-43c8-a706-a7657f16ce42-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217808 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217847 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/333e3fc9-dd6b-4295-9555-1f8c66440d44-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cr7gs\" (UID: \"333e3fc9-dd6b-4295-9555-1f8c66440d44\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217887 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-audit\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217919 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-etcd-serving-ca\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.217997 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218049 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khl2w\" (UniqueName: \"kubernetes.io/projected/21fffc77-e724-4f48-ac20-f21104224241-kube-api-access-khl2w\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218082 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9945a2a9-7f64-4d7c-bab3-aca70803734d-encryption-config\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218121 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91fee7b5-f700-4555-9ced-964fa79ba338-serving-cert\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218160 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/91fee7b5-f700-4555-9ced-964fa79ba338-etcd-client\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218208 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/868f657f-d9b7-43c8-a706-a7657f16ce42-config\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218249 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-client-ca\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218289 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/21fffc77-e724-4f48-ac20-f21104224241-audit-dir\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218328 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9945a2a9-7f64-4d7c-bab3-aca70803734d-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218350 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-image-import-ca\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218365 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218422 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218462 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-serving-cert\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218502 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/91fee7b5-f700-4555-9ced-964fa79ba338-etcd-service-ca\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.218701 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/157b16a5-6638-4f93-b6ae-616cadd9eb21-node-pullsecrets\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.219230 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-etcd-serving-ca\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.219317 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-audit\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.219907 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.220108 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-config\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.220677 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/157b16a5-6638-4f93-b6ae-616cadd9eb21-trusted-ca-bundle\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.223892 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-client-ca\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.223950 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9m8zz\" (UniqueName: \"kubernetes.io/projected/1688eab6-98cb-4e8e-97c5-f14a2fa0db76-kube-api-access-9m8zz\") pod \"downloads-7954f5f757-qtltp\" (UID: \"1688eab6-98cb-4e8e-97c5-f14a2fa0db76\") " pod="openshift-console/downloads-7954f5f757-qtltp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.237897 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-config\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.237927 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9rfz\" (UniqueName: \"kubernetes.io/projected/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-kube-api-access-r9rfz\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.237947 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.237989 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/494bd606-c814-41b7-8c9a-f89487408a08-config\") pod \"openshift-apiserver-operator-796bbdcf4f-txqrb\" (UID: \"494bd606-c814-41b7-8c9a-f89487408a08\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.238026 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/157b16a5-6638-4f93-b6ae-616cadd9eb21-serving-cert\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.238047 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/7710aca4-ceb7-4162-90b5-f8adc32e49bf-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.238066 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6trv6\" (UniqueName: \"kubernetes.io/projected/0ccea157-c5a1-4e31-958e-095aa3b77b80-kube-api-access-6trv6\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.238083 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-machine-approver-tls\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.238104 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9945a2a9-7f64-4d7c-bab3-aca70803734d-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.238119 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1446a2a9-8c10-4801-a3ce-2e08d66c81b2-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wdn94\" (UID: \"1446a2a9-8c10-4801-a3ce-2e08d66c81b2\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.238135 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/494bd606-c814-41b7-8c9a-f89487408a08-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-txqrb\" (UID: \"494bd606-c814-41b7-8c9a-f89487408a08\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.224168 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.240258 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.240285 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-42gdf"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.253923 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9945a2a9-7f64-4d7c-bab3-aca70803734d-audit-policies\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.224487 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-config\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.224833 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.230538 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/157b16a5-6638-4f93-b6ae-616cadd9eb21-encryption-config\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.233421 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/157b16a5-6638-4f93-b6ae-616cadd9eb21-etcd-client\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.258493 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sbxc8"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.230959 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66f325cc-3180-4c77-afdc-7a642717d31f-serving-cert\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.224938 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.226679 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.226730 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.226993 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.259156 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.227389 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.229599 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.235120 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.259839 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.259935 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.260228 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.260980 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.261284 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.262485 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-2x94m"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.262519 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.262740 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.263687 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.264399 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.264423 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-t9z5r"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.265303 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.266104 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.268855 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.270090 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.271341 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/157b16a5-6638-4f93-b6ae-616cadd9eb21-serving-cert\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.272813 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.274489 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.275323 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.276075 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ktv2t"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.276668 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.276723 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.277209 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.278435 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.278964 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.279701 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.281441 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-zvnwp"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.282724 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-zvnwp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.284337 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-qtltp"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.285477 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5kf8q"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.287465 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2q4hk"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.289382 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-zffp5"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.293564 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.293979 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.295342 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.300704 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ksq5j"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.303532 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.307483 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.308671 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-c2l58"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.309658 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-42gdf"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.310659 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.311586 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.312509 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.313463 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.314441 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.314478 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.315340 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-t6qks"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.317745 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.318337 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.319293 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-t9z5r"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.320245 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-jdjgs"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.321412 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.322325 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.323238 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mgqrs"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.324223 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.325228 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.326132 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-cxlvv"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.327523 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-sdbr8"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.327666 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.328956 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.330590 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sbxc8"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.330612 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.331727 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.333628 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.339081 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.339486 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.340912 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-zvnwp"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.344993 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.346580 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ktv2t"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.348946 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-cxlvv"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.351205 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-sdbr8"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.352255 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-mlf5b"] Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.352788 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-mlf5b" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354068 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354548 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/333e3fc9-dd6b-4295-9555-1f8c66440d44-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cr7gs\" (UID: \"333e3fc9-dd6b-4295-9555-1f8c66440d44\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354571 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5d011f07-46e6-4102-bec8-022da84881ac-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-d8tft\" (UID: \"5d011f07-46e6-4102-bec8-022da84881ac\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354591 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9945a2a9-7f64-4d7c-bab3-aca70803734d-serving-cert\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354607 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba443197-0445-4bc8-915a-d4d6f49bdea7-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjf9j\" (UID: \"ba443197-0445-4bc8-915a-d4d6f49bdea7\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354622 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9945a2a9-7f64-4d7c-bab3-aca70803734d-etcd-client\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354661 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57msn\" (UniqueName: \"kubernetes.io/projected/62c5310d-d281-4096-b5ae-fbdd368daa44-kube-api-access-57msn\") pod \"dns-operator-744455d44c-42gdf\" (UID: \"62c5310d-d281-4096-b5ae-fbdd368daa44\") " pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354682 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37c802fe-446b-4a2f-a17d-6db1eafb0318-service-ca-bundle\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354704 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj6h7\" (UniqueName: \"kubernetes.io/projected/a30b3fac-e050-452c-8806-2120c8a6fe6b-kube-api-access-tj6h7\") pod \"control-plane-machine-set-operator-78cbb6b69f-4dn4j\" (UID: \"a30b3fac-e050-452c-8806-2120c8a6fe6b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354741 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354757 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37c802fe-446b-4a2f-a17d-6db1eafb0318-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354773 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/868f657f-d9b7-43c8-a706-a7657f16ce42-images\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354787 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba443197-0445-4bc8-915a-d4d6f49bdea7-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjf9j\" (UID: \"ba443197-0445-4bc8-915a-d4d6f49bdea7\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354820 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0ccea157-c5a1-4e31-958e-095aa3b77b80-metrics-tls\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354836 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-oauth-config\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354852 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1446a2a9-8c10-4801-a3ce-2e08d66c81b2-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wdn94\" (UID: \"1446a2a9-8c10-4801-a3ce-2e08d66c81b2\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354866 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-config\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354909 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a30b3fac-e050-452c-8806-2120c8a6fe6b-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4dn4j\" (UID: \"a30b3fac-e050-452c-8806-2120c8a6fe6b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354926 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354941 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-serving-cert\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354958 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ccea157-c5a1-4e31-958e-095aa3b77b80-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354972 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/868f657f-d9b7-43c8-a706-a7657f16ce42-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.354990 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355006 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-srv-cert\") pod \"catalog-operator-68c6474976-c49xs\" (UID: \"b97ce800-9e29-48e1-8047-83e363a75a16\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355022 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/91fee7b5-f700-4555-9ced-964fa79ba338-etcd-client\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355037 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-config-volume\") pod \"collect-profiles-29401005-fjmdj\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355053 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355069 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355084 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qf2rf\" (UniqueName: \"kubernetes.io/projected/5d011f07-46e6-4102-bec8-022da84881ac-kube-api-access-qf2rf\") pod \"machine-config-controller-84d6567774-d8tft\" (UID: \"5d011f07-46e6-4102-bec8-022da84881ac\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355100 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/91fee7b5-f700-4555-9ced-964fa79ba338-etcd-service-ca\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355116 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-config\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355131 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9rfz\" (UniqueName: \"kubernetes.io/projected/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-kube-api-access-r9rfz\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355146 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355163 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6trv6\" (UniqueName: \"kubernetes.io/projected/0ccea157-c5a1-4e31-958e-095aa3b77b80-kube-api-access-6trv6\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355178 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-machine-approver-tls\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355215 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/494bd606-c814-41b7-8c9a-f89487408a08-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-txqrb\" (UID: \"494bd606-c814-41b7-8c9a-f89487408a08\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355229 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9945a2a9-7f64-4d7c-bab3-aca70803734d-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355246 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355263 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-oauth-serving-cert\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355278 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-client-ca\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355291 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0ccea157-c5a1-4e31-958e-095aa3b77b80-trusted-ca\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355306 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6d9jl\" (UniqueName: \"kubernetes.io/projected/9945a2a9-7f64-4d7c-bab3-aca70803734d-kube-api-access-6d9jl\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355321 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b500c768-0c0e-4a28-8809-5e181e03bc5c-signing-cabundle\") pod \"service-ca-9c57cc56f-t9z5r\" (UID: \"b500c768-0c0e-4a28-8809-5e181e03bc5c\") " pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355342 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cntkl\" (UniqueName: \"kubernetes.io/projected/91fee7b5-f700-4555-9ced-964fa79ba338-kube-api-access-cntkl\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355358 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/aa23081e-8667-4a54-a39f-8a8073436dd9-available-featuregates\") pod \"openshift-config-operator-7777fb866f-c2l58\" (UID: \"aa23081e-8667-4a54-a39f-8a8073436dd9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355373 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/91fee7b5-f700-4555-9ced-964fa79ba338-etcd-ca\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355388 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-service-ca\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355402 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-trusted-ca-bundle\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355417 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a2c26b18-e2d1-44ff-91bf-44e7a6d1a097-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wb24h\" (UID: \"a2c26b18-e2d1-44ff-91bf-44e7a6d1a097\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355434 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-config\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355466 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18c360f0-89d1-46d9-ad99-40cc04b88546-config\") pod \"service-ca-operator-777779d784-9lhfl\" (UID: \"18c360f0-89d1-46d9-ad99-40cc04b88546\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355483 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rpjw\" (UniqueName: \"kubernetes.io/projected/494bd606-c814-41b7-8c9a-f89487408a08-kube-api-access-5rpjw\") pod \"openshift-apiserver-operator-796bbdcf4f-txqrb\" (UID: \"494bd606-c814-41b7-8c9a-f89487408a08\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355505 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37c802fe-446b-4a2f-a17d-6db1eafb0318-serving-cert\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355521 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/62c5310d-d281-4096-b5ae-fbdd368daa44-metrics-tls\") pod \"dns-operator-744455d44c-42gdf\" (UID: \"62c5310d-d281-4096-b5ae-fbdd368daa44\") " pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355537 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmx5f\" (UniqueName: \"kubernetes.io/projected/18c360f0-89d1-46d9-ad99-40cc04b88546-kube-api-access-rmx5f\") pod \"service-ca-operator-777779d784-9lhfl\" (UID: \"18c360f0-89d1-46d9-ad99-40cc04b88546\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355553 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-serving-cert\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355558 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba443197-0445-4bc8-915a-d4d6f49bdea7-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjf9j\" (UID: \"ba443197-0445-4bc8-915a-d4d6f49bdea7\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355570 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czwl2\" (UniqueName: \"kubernetes.io/projected/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-kube-api-access-czwl2\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355585 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-config\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355602 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91fee7b5-f700-4555-9ced-964fa79ba338-config\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355620 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcb76\" (UniqueName: \"kubernetes.io/projected/37c802fe-446b-4a2f-a17d-6db1eafb0318-kube-api-access-kcb76\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355636 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttvsx\" (UniqueName: \"kubernetes.io/projected/ba443197-0445-4bc8-915a-d4d6f49bdea7-kube-api-access-ttvsx\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjf9j\" (UID: \"ba443197-0445-4bc8-915a-d4d6f49bdea7\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355651 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dm2nl\" (UniqueName: \"kubernetes.io/projected/7710aca4-ceb7-4162-90b5-f8adc32e49bf-kube-api-access-dm2nl\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355667 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/333e3fc9-dd6b-4295-9555-1f8c66440d44-config\") pod \"kube-controller-manager-operator-78b949d7b-cr7gs\" (UID: \"333e3fc9-dd6b-4295-9555-1f8c66440d44\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355688 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhhb8\" (UniqueName: \"kubernetes.io/projected/a2c26b18-e2d1-44ff-91bf-44e7a6d1a097-kube-api-access-rhhb8\") pod \"cluster-samples-operator-665b6dd947-wb24h\" (UID: \"a2c26b18-e2d1-44ff-91bf-44e7a6d1a097\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355703 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-auth-proxy-config\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355719 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9cm2\" (UniqueName: \"kubernetes.io/projected/868f657f-d9b7-43c8-a706-a7657f16ce42-kube-api-access-l9cm2\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355734 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355753 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355768 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-trusted-ca\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355783 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/18c360f0-89d1-46d9-ad99-40cc04b88546-serving-cert\") pod \"service-ca-operator-777779d784-9lhfl\" (UID: \"18c360f0-89d1-46d9-ad99-40cc04b88546\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355799 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7710aca4-ceb7-4162-90b5-f8adc32e49bf-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355815 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37c802fe-446b-4a2f-a17d-6db1eafb0318-config\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355830 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa23081e-8667-4a54-a39f-8a8073436dd9-serving-cert\") pod \"openshift-config-operator-7777fb866f-c2l58\" (UID: \"aa23081e-8667-4a54-a39f-8a8073436dd9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355849 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355865 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9945a2a9-7f64-4d7c-bab3-aca70803734d-audit-dir\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355881 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/831022da-9b12-412f-b477-9c592428fe60-images\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355896 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-audit-policies\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355911 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5d011f07-46e6-4102-bec8-022da84881ac-proxy-tls\") pod \"machine-config-controller-84d6567774-d8tft\" (UID: \"5d011f07-46e6-4102-bec8-022da84881ac\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355916 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37c802fe-446b-4a2f-a17d-6db1eafb0318-service-ca-bundle\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.356932 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37c802fe-446b-4a2f-a17d-6db1eafb0318-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.357203 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.357629 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-service-ca\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.357915 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-config\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.358356 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/868f657f-d9b7-43c8-a706-a7657f16ce42-images\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.358467 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-trusted-ca-bundle\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.358587 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba443197-0445-4bc8-915a-d4d6f49bdea7-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjf9j\" (UID: \"ba443197-0445-4bc8-915a-d4d6f49bdea7\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.358767 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-auth-proxy-config\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.359212 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.359203 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-oauth-config\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.359740 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-config\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.359976 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-config\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.360248 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.360416 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/91fee7b5-f700-4555-9ced-964fa79ba338-config\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.360705 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.361113 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-config\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.361135 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-serving-cert\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.361147 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a2c26b18-e2d1-44ff-91bf-44e7a6d1a097-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-wb24h\" (UID: \"a2c26b18-e2d1-44ff-91bf-44e7a6d1a097\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.361590 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.361679 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/91fee7b5-f700-4555-9ced-964fa79ba338-etcd-client\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.361844 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.361911 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/9945a2a9-7f64-4d7c-bab3-aca70803734d-audit-dir\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.362520 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-trusted-ca\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.362591 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9945a2a9-7f64-4d7c-bab3-aca70803734d-serving-cert\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.363016 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-machine-approver-tls\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.363176 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/91fee7b5-f700-4555-9ced-964fa79ba338-etcd-service-ca\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.363323 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7710aca4-ceb7-4162-90b5-f8adc32e49bf-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.363773 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37c802fe-446b-4a2f-a17d-6db1eafb0318-config\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.363805 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-audit-policies\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.363971 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-serving-cert\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.363981 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/9945a2a9-7f64-4d7c-bab3-aca70803734d-etcd-client\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364378 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-oauth-serving-cert\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364466 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/9945a2a9-7f64-4d7c-bab3-aca70803734d-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.355928 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364555 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/333e3fc9-dd6b-4295-9555-1f8c66440d44-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cr7gs\" (UID: \"333e3fc9-dd6b-4295-9555-1f8c66440d44\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364575 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khl2w\" (UniqueName: \"kubernetes.io/projected/21fffc77-e724-4f48-ac20-f21104224241-kube-api-access-khl2w\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364592 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9945a2a9-7f64-4d7c-bab3-aca70803734d-encryption-config\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364614 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/831022da-9b12-412f-b477-9c592428fe60-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364633 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/868f657f-d9b7-43c8-a706-a7657f16ce42-config\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364650 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91fee7b5-f700-4555-9ced-964fa79ba338-serving-cert\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364666 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8p2j\" (UniqueName: \"kubernetes.io/projected/831022da-9b12-412f-b477-9c592428fe60-kube-api-access-j8p2j\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364683 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-secret-volume\") pod \"collect-profiles-29401005-fjmdj\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364700 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9945a2a9-7f64-4d7c-bab3-aca70803734d-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364715 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-profile-collector-cert\") pod \"catalog-operator-68c6474976-c49xs\" (UID: \"b97ce800-9e29-48e1-8047-83e363a75a16\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364735 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/21fffc77-e724-4f48-ac20-f21104224241-audit-dir\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364752 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-serving-cert\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364774 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9m8zz\" (UniqueName: \"kubernetes.io/projected/1688eab6-98cb-4e8e-97c5-f14a2fa0db76-kube-api-access-9m8zz\") pod \"downloads-7954f5f757-qtltp\" (UID: \"1688eab6-98cb-4e8e-97c5-f14a2fa0db76\") " pod="openshift-console/downloads-7954f5f757-qtltp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364791 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/aa23081e-8667-4a54-a39f-8a8073436dd9-available-featuregates\") pod \"openshift-config-operator-7777fb866f-c2l58\" (UID: \"aa23081e-8667-4a54-a39f-8a8073436dd9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364802 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/494bd606-c814-41b7-8c9a-f89487408a08-config\") pod \"openshift-apiserver-operator-796bbdcf4f-txqrb\" (UID: \"494bd606-c814-41b7-8c9a-f89487408a08\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364853 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/7710aca4-ceb7-4162-90b5-f8adc32e49bf-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364889 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9945a2a9-7f64-4d7c-bab3-aca70803734d-audit-policies\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364911 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1446a2a9-8c10-4801-a3ce-2e08d66c81b2-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wdn94\" (UID: \"1446a2a9-8c10-4801-a3ce-2e08d66c81b2\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364923 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-client-ca\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364932 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/831022da-9b12-412f-b477-9c592428fe60-proxy-tls\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364951 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hld9k\" (UniqueName: \"kubernetes.io/projected/b500c768-0c0e-4a28-8809-5e181e03bc5c-kube-api-access-hld9k\") pod \"service-ca-9c57cc56f-t9z5r\" (UID: \"b500c768-0c0e-4a28-8809-5e181e03bc5c\") " pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364957 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364975 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7710aca4-ceb7-4162-90b5-f8adc32e49bf-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.364993 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64w4s\" (UniqueName: \"kubernetes.io/projected/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-kube-api-access-64w4s\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.365264 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/494bd606-c814-41b7-8c9a-f89487408a08-config\") pod \"openshift-apiserver-operator-796bbdcf4f-txqrb\" (UID: \"494bd606-c814-41b7-8c9a-f89487408a08\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.365320 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm7f2\" (UniqueName: \"kubernetes.io/projected/b97ce800-9e29-48e1-8047-83e363a75a16-kube-api-access-vm7f2\") pod \"catalog-operator-68c6474976-c49xs\" (UID: \"b97ce800-9e29-48e1-8047-83e363a75a16\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.365340 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b500c768-0c0e-4a28-8809-5e181e03bc5c-signing-key\") pod \"service-ca-9c57cc56f-t9z5r\" (UID: \"b500c768-0c0e-4a28-8809-5e181e03bc5c\") " pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.365359 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmp6c\" (UniqueName: \"kubernetes.io/projected/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-kube-api-access-xmp6c\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.365376 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bp4mx\" (UniqueName: \"kubernetes.io/projected/aa23081e-8667-4a54-a39f-8a8073436dd9-kube-api-access-bp4mx\") pod \"openshift-config-operator-7777fb866f-c2l58\" (UID: \"aa23081e-8667-4a54-a39f-8a8073436dd9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.365392 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1446a2a9-8c10-4801-a3ce-2e08d66c81b2-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wdn94\" (UID: \"1446a2a9-8c10-4801-a3ce-2e08d66c81b2\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.365409 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6p6wr\" (UniqueName: \"kubernetes.io/projected/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-kube-api-access-6p6wr\") pod \"collect-profiles-29401005-fjmdj\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.365431 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/91fee7b5-f700-4555-9ced-964fa79ba338-etcd-ca\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.365447 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.365996 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1446a2a9-8c10-4801-a3ce-2e08d66c81b2-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wdn94\" (UID: \"1446a2a9-8c10-4801-a3ce-2e08d66c81b2\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.366119 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/868f657f-d9b7-43c8-a706-a7657f16ce42-config\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.366267 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/21fffc77-e724-4f48-ac20-f21104224241-audit-dir\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.366380 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.366565 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/9945a2a9-7f64-4d7c-bab3-aca70803734d-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.366587 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/494bd606-c814-41b7-8c9a-f89487408a08-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-txqrb\" (UID: \"494bd606-c814-41b7-8c9a-f89487408a08\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.366829 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/9945a2a9-7f64-4d7c-bab3-aca70803734d-audit-policies\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.367110 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.368119 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/9945a2a9-7f64-4d7c-bab3-aca70803734d-encryption-config\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.368517 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-serving-cert\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.369068 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1446a2a9-8c10-4801-a3ce-2e08d66c81b2-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wdn94\" (UID: \"1446a2a9-8c10-4801-a3ce-2e08d66c81b2\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.369270 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/91fee7b5-f700-4555-9ced-964fa79ba338-serving-cert\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.369729 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37c802fe-446b-4a2f-a17d-6db1eafb0318-serving-cert\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.373834 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.374370 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/868f657f-d9b7-43c8-a706-a7657f16ce42-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.377514 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.383522 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa23081e-8667-4a54-a39f-8a8073436dd9-serving-cert\") pod \"openshift-config-operator-7777fb866f-c2l58\" (UID: \"aa23081e-8667-4a54-a39f-8a8073436dd9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.393493 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.413178 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.434202 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.439819 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/7710aca4-ceb7-4162-90b5-f8adc32e49bf-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.453779 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466235 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-profile-collector-cert\") pod \"catalog-operator-68c6474976-c49xs\" (UID: \"b97ce800-9e29-48e1-8047-83e363a75a16\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466285 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/831022da-9b12-412f-b477-9c592428fe60-proxy-tls\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466304 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hld9k\" (UniqueName: \"kubernetes.io/projected/b500c768-0c0e-4a28-8809-5e181e03bc5c-kube-api-access-hld9k\") pod \"service-ca-9c57cc56f-t9z5r\" (UID: \"b500c768-0c0e-4a28-8809-5e181e03bc5c\") " pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466331 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm7f2\" (UniqueName: \"kubernetes.io/projected/b97ce800-9e29-48e1-8047-83e363a75a16-kube-api-access-vm7f2\") pod \"catalog-operator-68c6474976-c49xs\" (UID: \"b97ce800-9e29-48e1-8047-83e363a75a16\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466348 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b500c768-0c0e-4a28-8809-5e181e03bc5c-signing-key\") pod \"service-ca-9c57cc56f-t9z5r\" (UID: \"b500c768-0c0e-4a28-8809-5e181e03bc5c\") " pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466380 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6p6wr\" (UniqueName: \"kubernetes.io/projected/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-kube-api-access-6p6wr\") pod \"collect-profiles-29401005-fjmdj\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466411 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5d011f07-46e6-4102-bec8-022da84881ac-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-d8tft\" (UID: \"5d011f07-46e6-4102-bec8-022da84881ac\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466430 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57msn\" (UniqueName: \"kubernetes.io/projected/62c5310d-d281-4096-b5ae-fbdd368daa44-kube-api-access-57msn\") pod \"dns-operator-744455d44c-42gdf\" (UID: \"62c5310d-d281-4096-b5ae-fbdd368daa44\") " pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466454 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj6h7\" (UniqueName: \"kubernetes.io/projected/a30b3fac-e050-452c-8806-2120c8a6fe6b-kube-api-access-tj6h7\") pod \"control-plane-machine-set-operator-78cbb6b69f-4dn4j\" (UID: \"a30b3fac-e050-452c-8806-2120c8a6fe6b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466475 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a30b3fac-e050-452c-8806-2120c8a6fe6b-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4dn4j\" (UID: \"a30b3fac-e050-452c-8806-2120c8a6fe6b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466505 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-srv-cert\") pod \"catalog-operator-68c6474976-c49xs\" (UID: \"b97ce800-9e29-48e1-8047-83e363a75a16\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466521 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qf2rf\" (UniqueName: \"kubernetes.io/projected/5d011f07-46e6-4102-bec8-022da84881ac-kube-api-access-qf2rf\") pod \"machine-config-controller-84d6567774-d8tft\" (UID: \"5d011f07-46e6-4102-bec8-022da84881ac\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466544 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-config-volume\") pod \"collect-profiles-29401005-fjmdj\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466582 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b500c768-0c0e-4a28-8809-5e181e03bc5c-signing-cabundle\") pod \"service-ca-9c57cc56f-t9z5r\" (UID: \"b500c768-0c0e-4a28-8809-5e181e03bc5c\") " pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466626 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18c360f0-89d1-46d9-ad99-40cc04b88546-config\") pod \"service-ca-operator-777779d784-9lhfl\" (UID: \"18c360f0-89d1-46d9-ad99-40cc04b88546\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466660 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/62c5310d-d281-4096-b5ae-fbdd368daa44-metrics-tls\") pod \"dns-operator-744455d44c-42gdf\" (UID: \"62c5310d-d281-4096-b5ae-fbdd368daa44\") " pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466677 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmx5f\" (UniqueName: \"kubernetes.io/projected/18c360f0-89d1-46d9-ad99-40cc04b88546-kube-api-access-rmx5f\") pod \"service-ca-operator-777779d784-9lhfl\" (UID: \"18c360f0-89d1-46d9-ad99-40cc04b88546\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466731 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/18c360f0-89d1-46d9-ad99-40cc04b88546-serving-cert\") pod \"service-ca-operator-777779d784-9lhfl\" (UID: \"18c360f0-89d1-46d9-ad99-40cc04b88546\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466747 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/831022da-9b12-412f-b477-9c592428fe60-images\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466762 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5d011f07-46e6-4102-bec8-022da84881ac-proxy-tls\") pod \"machine-config-controller-84d6567774-d8tft\" (UID: \"5d011f07-46e6-4102-bec8-022da84881ac\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466793 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/831022da-9b12-412f-b477-9c592428fe60-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466809 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8p2j\" (UniqueName: \"kubernetes.io/projected/831022da-9b12-412f-b477-9c592428fe60-kube-api-access-j8p2j\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.466825 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-secret-volume\") pod \"collect-profiles-29401005-fjmdj\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.467403 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/831022da-9b12-412f-b477-9c592428fe60-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.467823 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5d011f07-46e6-4102-bec8-022da84881ac-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-d8tft\" (UID: \"5d011f07-46e6-4102-bec8-022da84881ac\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.473263 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.493958 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.501532 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0ccea157-c5a1-4e31-958e-095aa3b77b80-metrics-tls\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.518989 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.524819 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0ccea157-c5a1-4e31-958e-095aa3b77b80-trusted-ca\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.533275 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.555920 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.559846 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/333e3fc9-dd6b-4295-9555-1f8c66440d44-config\") pod \"kube-controller-manager-operator-78b949d7b-cr7gs\" (UID: \"333e3fc9-dd6b-4295-9555-1f8c66440d44\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.574017 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.593153 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.599360 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/333e3fc9-dd6b-4295-9555-1f8c66440d44-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cr7gs\" (UID: \"333e3fc9-dd6b-4295-9555-1f8c66440d44\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.613562 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.634884 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.654646 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.673737 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.693931 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.713629 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.733747 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.755135 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.774865 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.794476 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.815258 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.834558 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.854869 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.876019 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.894672 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.914418 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.934448 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.956937 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.974129 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 08:51:24 crc kubenswrapper[4932]: I1125 08:51:24.995541 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.014658 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.021039 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5d011f07-46e6-4102-bec8-022da84881ac-proxy-tls\") pod \"machine-config-controller-84d6567774-d8tft\" (UID: \"5d011f07-46e6-4102-bec8-022da84881ac\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.034535 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.081640 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jb7sw\" (UniqueName: \"kubernetes.io/projected/157b16a5-6638-4f93-b6ae-616cadd9eb21-kube-api-access-jb7sw\") pod \"apiserver-76f77b778f-zffp5\" (UID: \"157b16a5-6638-4f93-b6ae-616cadd9eb21\") " pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.100073 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twnjh\" (UniqueName: \"kubernetes.io/projected/66f325cc-3180-4c77-afdc-7a642717d31f-kube-api-access-twnjh\") pod \"controller-manager-879f6c89f-ctx5h\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.113705 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.117454 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/831022da-9b12-412f-b477-9c592428fe60-images\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.134634 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.141433 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/831022da-9b12-412f-b477-9c592428fe60-proxy-tls\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.154360 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.174042 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.193895 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.202711 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/62c5310d-d281-4096-b5ae-fbdd368daa44-metrics-tls\") pod \"dns-operator-744455d44c-42gdf\" (UID: \"62c5310d-d281-4096-b5ae-fbdd368daa44\") " pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.214621 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.235074 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.255785 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.272625 4932 request.go:700] Waited for 1.011450953s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-dns-operator/secrets?fieldSelector=metadata.name%3Ddns-operator-dockercfg-9mqw5&limit=500&resourceVersion=0 Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.275590 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.295774 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.301685 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a30b3fac-e050-452c-8806-2120c8a6fe6b-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4dn4j\" (UID: \"a30b3fac-e050-452c-8806-2120c8a6fe6b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.314943 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.317251 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.334998 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.335396 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.354620 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.376529 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.394680 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.414966 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.418936 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/b500c768-0c0e-4a28-8809-5e181e03bc5c-signing-cabundle\") pod \"service-ca-9c57cc56f-t9z5r\" (UID: \"b500c768-0c0e-4a28-8809-5e181e03bc5c\") " pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.434633 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.457015 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.462815 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/b500c768-0c0e-4a28-8809-5e181e03bc5c-signing-key\") pod \"service-ca-9c57cc56f-t9z5r\" (UID: \"b500c768-0c0e-4a28-8809-5e181e03bc5c\") " pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.467343 4932 configmap.go:193] Couldn't get configMap openshift-operator-lifecycle-manager/collect-profiles-config: failed to sync configmap cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.467439 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-config-volume podName:ca7ad64f-5d34-4269-9faf-46bc2e3cab93 nodeName:}" failed. No retries permitted until 2025-11-25 08:51:25.967412942 +0000 UTC m=+146.093442515 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-config-volume") pod "collect-profiles-29401005-fjmdj" (UID: "ca7ad64f-5d34-4269-9faf-46bc2e3cab93") : failed to sync configmap cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.471557 4932 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.471646 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-srv-cert podName:b97ce800-9e29-48e1-8047-83e363a75a16 nodeName:}" failed. No retries permitted until 2025-11-25 08:51:25.971624171 +0000 UTC m=+146.097653754 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-srv-cert") pod "catalog-operator-68c6474976-c49xs" (UID: "b97ce800-9e29-48e1-8047-83e363a75a16") : failed to sync secret cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.471676 4932 secret.go:188] Couldn't get secret openshift-service-ca-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.471705 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/18c360f0-89d1-46d9-ad99-40cc04b88546-serving-cert podName:18c360f0-89d1-46d9-ad99-40cc04b88546 nodeName:}" failed. No retries permitted until 2025-11-25 08:51:25.971696994 +0000 UTC m=+146.097726567 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/18c360f0-89d1-46d9-ad99-40cc04b88546-serving-cert") pod "service-ca-operator-777779d784-9lhfl" (UID: "18c360f0-89d1-46d9-ad99-40cc04b88546") : failed to sync secret cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.471865 4932 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.471897 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-secret-volume podName:ca7ad64f-5d34-4269-9faf-46bc2e3cab93 nodeName:}" failed. No retries permitted until 2025-11-25 08:51:25.971888711 +0000 UTC m=+146.097918294 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "secret-volume" (UniqueName: "kubernetes.io/secret/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-secret-volume") pod "collect-profiles-29401005-fjmdj" (UID: "ca7ad64f-5d34-4269-9faf-46bc2e3cab93") : failed to sync secret cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.471948 4932 configmap.go:193] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.472024 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/18c360f0-89d1-46d9-ad99-40cc04b88546-config podName:18c360f0-89d1-46d9-ad99-40cc04b88546 nodeName:}" failed. No retries permitted until 2025-11-25 08:51:25.972000836 +0000 UTC m=+146.098030419 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/18c360f0-89d1-46d9-ad99-40cc04b88546-config") pod "service-ca-operator-777779d784-9lhfl" (UID: "18c360f0-89d1-46d9-ad99-40cc04b88546") : failed to sync configmap cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.472222 4932 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/pprof-cert: failed to sync secret cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: E1125 08:51:25.472261 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-profile-collector-cert podName:b97ce800-9e29-48e1-8047-83e363a75a16 nodeName:}" failed. No retries permitted until 2025-11-25 08:51:25.972249606 +0000 UTC m=+146.098279189 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "profile-collector-cert" (UniqueName: "kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-profile-collector-cert") pod "catalog-operator-68c6474976-c49xs" (UID: "b97ce800-9e29-48e1-8047-83e363a75a16") : failed to sync secret cache: timed out waiting for the condition Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.473942 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.496040 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.513919 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.534322 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.553661 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.561985 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ctx5h"] Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.577966 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.596886 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.607203 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-zffp5"] Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.613725 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 08:51:25 crc kubenswrapper[4932]: W1125 08:51:25.617048 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod157b16a5_6638_4f93_b6ae_616cadd9eb21.slice/crio-b1a8324afd74a9a3486e79f3d40b06f2cb336c8576e22c3758162b0f27b67019 WatchSource:0}: Error finding container b1a8324afd74a9a3486e79f3d40b06f2cb336c8576e22c3758162b0f27b67019: Status 404 returned error can't find the container with id b1a8324afd74a9a3486e79f3d40b06f2cb336c8576e22c3758162b0f27b67019 Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.634197 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.655736 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.673403 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.694442 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.714428 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.744115 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.755058 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.773670 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.848928 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.849059 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.851324 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.855048 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.874243 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.894275 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.914253 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.933789 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.953904 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.992875 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/18c360f0-89d1-46d9-ad99-40cc04b88546-serving-cert\") pod \"service-ca-operator-777779d784-9lhfl\" (UID: \"18c360f0-89d1-46d9-ad99-40cc04b88546\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.993069 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-secret-volume\") pod \"collect-profiles-29401005-fjmdj\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.993946 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.994007 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-profile-collector-cert\") pod \"catalog-operator-68c6474976-c49xs\" (UID: \"b97ce800-9e29-48e1-8047-83e363a75a16\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.994374 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-srv-cert\") pod \"catalog-operator-68c6474976-c49xs\" (UID: \"b97ce800-9e29-48e1-8047-83e363a75a16\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.994446 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-config-volume\") pod \"collect-profiles-29401005-fjmdj\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.994588 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18c360f0-89d1-46d9-ad99-40cc04b88546-config\") pod \"service-ca-operator-777779d784-9lhfl\" (UID: \"18c360f0-89d1-46d9-ad99-40cc04b88546\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.995802 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-config-volume\") pod \"collect-profiles-29401005-fjmdj\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:25 crc kubenswrapper[4932]: I1125 08:51:25.996074 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/18c360f0-89d1-46d9-ad99-40cc04b88546-config\") pod \"service-ca-operator-777779d784-9lhfl\" (UID: \"18c360f0-89d1-46d9-ad99-40cc04b88546\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.000113 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-srv-cert\") pod \"catalog-operator-68c6474976-c49xs\" (UID: \"b97ce800-9e29-48e1-8047-83e363a75a16\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.001180 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-secret-volume\") pod \"collect-profiles-29401005-fjmdj\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.001227 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b97ce800-9e29-48e1-8047-83e363a75a16-profile-collector-cert\") pod \"catalog-operator-68c6474976-c49xs\" (UID: \"b97ce800-9e29-48e1-8047-83e363a75a16\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.002180 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/18c360f0-89d1-46d9-ad99-40cc04b88546-serving-cert\") pod \"service-ca-operator-777779d784-9lhfl\" (UID: \"18c360f0-89d1-46d9-ad99-40cc04b88546\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.014144 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.035572 4932 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.054375 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.075026 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.094422 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.115092 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.134837 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.155573 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.201660 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dm2nl\" (UniqueName: \"kubernetes.io/projected/7710aca4-ceb7-4162-90b5-f8adc32e49bf-kube-api-access-dm2nl\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.221648 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9rfz\" (UniqueName: \"kubernetes.io/projected/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-kube-api-access-r9rfz\") pod \"route-controller-manager-6576b87f9c-v2fzp\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.243308 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhhb8\" (UniqueName: \"kubernetes.io/projected/a2c26b18-e2d1-44ff-91bf-44e7a6d1a097-kube-api-access-rhhb8\") pod \"cluster-samples-operator-665b6dd947-wb24h\" (UID: \"a2c26b18-e2d1-44ff-91bf-44e7a6d1a097\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.256423 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9cm2\" (UniqueName: \"kubernetes.io/projected/868f657f-d9b7-43c8-a706-a7657f16ce42-kube-api-access-l9cm2\") pod \"machine-api-operator-5694c8668f-2x94m\" (UID: \"868f657f-d9b7-43c8-a706-a7657f16ce42\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.274589 4932 request.go:700] Waited for 1.914091152s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/serviceaccounts/ingress-operator/token Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.283560 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rpjw\" (UniqueName: \"kubernetes.io/projected/494bd606-c814-41b7-8c9a-f89487408a08-kube-api-access-5rpjw\") pod \"openshift-apiserver-operator-796bbdcf4f-txqrb\" (UID: \"494bd606-c814-41b7-8c9a-f89487408a08\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.297921 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6trv6\" (UniqueName: \"kubernetes.io/projected/0ccea157-c5a1-4e31-958e-095aa3b77b80-kube-api-access-6trv6\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.312989 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czwl2\" (UniqueName: \"kubernetes.io/projected/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-kube-api-access-czwl2\") pod \"console-f9d7485db-5kf8q\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.313182 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.331577 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcb76\" (UniqueName: \"kubernetes.io/projected/37c802fe-446b-4a2f-a17d-6db1eafb0318-kube-api-access-kcb76\") pod \"authentication-operator-69f744f599-jdjgs\" (UID: \"37c802fe-446b-4a2f-a17d-6db1eafb0318\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.335402 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.348204 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0ccea157-c5a1-4e31-958e-095aa3b77b80-bound-sa-token\") pod \"ingress-operator-5b745b69d9-9w9dr\" (UID: \"0ccea157-c5a1-4e31-958e-095aa3b77b80\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.369321 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttvsx\" (UniqueName: \"kubernetes.io/projected/ba443197-0445-4bc8-915a-d4d6f49bdea7-kube-api-access-ttvsx\") pod \"openshift-controller-manager-operator-756b6f6bc6-kjf9j\" (UID: \"ba443197-0445-4bc8-915a-d4d6f49bdea7\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.390479 4932 generic.go:334] "Generic (PLEG): container finished" podID="157b16a5-6638-4f93-b6ae-616cadd9eb21" containerID="640c4ce1705636da119907f0aa89ba4bac3d2311cfdda6ba84b108fd599bb972" exitCode=0 Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.390630 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zffp5" event={"ID":"157b16a5-6638-4f93-b6ae-616cadd9eb21","Type":"ContainerDied","Data":"640c4ce1705636da119907f0aa89ba4bac3d2311cfdda6ba84b108fd599bb972"} Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.390674 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zffp5" event={"ID":"157b16a5-6638-4f93-b6ae-616cadd9eb21","Type":"ContainerStarted","Data":"b1a8324afd74a9a3486e79f3d40b06f2cb336c8576e22c3758162b0f27b67019"} Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.391721 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6d9jl\" (UniqueName: \"kubernetes.io/projected/9945a2a9-7f64-4d7c-bab3-aca70803734d-kube-api-access-6d9jl\") pod \"apiserver-7bbb656c7d-4g2lq\" (UID: \"9945a2a9-7f64-4d7c-bab3-aca70803734d\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.392549 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" event={"ID":"66f325cc-3180-4c77-afdc-7a642717d31f","Type":"ContainerStarted","Data":"c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681"} Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.392598 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" event={"ID":"66f325cc-3180-4c77-afdc-7a642717d31f","Type":"ContainerStarted","Data":"4b16452645b1969534d63060a7655e5bad74a9ca36987dbd7b27c4f4ba1bca4f"} Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.396104 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.396578 4932 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-ctx5h container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.396610 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" podUID="66f325cc-3180-4c77-afdc-7a642717d31f" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.396666 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.401555 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.405661 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.408685 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7710aca4-ceb7-4162-90b5-f8adc32e49bf-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-5lgl4\" (UID: \"7710aca4-ceb7-4162-90b5-f8adc32e49bf\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.426966 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cntkl\" (UniqueName: \"kubernetes.io/projected/91fee7b5-f700-4555-9ced-964fa79ba338-kube-api-access-cntkl\") pod \"etcd-operator-b45778765-2q4hk\" (UID: \"91fee7b5-f700-4555-9ced-964fa79ba338\") " pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.432990 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.438588 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.451298 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/333e3fc9-dd6b-4295-9555-1f8c66440d44-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cr7gs\" (UID: \"333e3fc9-dd6b-4295-9555-1f8c66440d44\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.453358 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.458704 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.469281 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khl2w\" (UniqueName: \"kubernetes.io/projected/21fffc77-e724-4f48-ac20-f21104224241-kube-api-access-khl2w\") pod \"oauth-openshift-558db77b4-ksq5j\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.493972 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64w4s\" (UniqueName: \"kubernetes.io/projected/9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e-kube-api-access-64w4s\") pod \"console-operator-58897d9998-t6qks\" (UID: \"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e\") " pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.500799 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.505439 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.505732 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-2x94m"] Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.511422 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1446a2a9-8c10-4801-a3ce-2e08d66c81b2-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-wdn94\" (UID: \"1446a2a9-8c10-4801-a3ce-2e08d66c81b2\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.546376 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bp4mx\" (UniqueName: \"kubernetes.io/projected/aa23081e-8667-4a54-a39f-8a8073436dd9-kube-api-access-bp4mx\") pod \"openshift-config-operator-7777fb866f-c2l58\" (UID: \"aa23081e-8667-4a54-a39f-8a8073436dd9\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:26 crc kubenswrapper[4932]: W1125 08:51:26.550830 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod868f657f_d9b7_43c8_a706_a7657f16ce42.slice/crio-a0f4a039ca76aad490ec478db99c1fdf622ac26dbcbb68355a604771b0d1fa87 WatchSource:0}: Error finding container a0f4a039ca76aad490ec478db99c1fdf622ac26dbcbb68355a604771b0d1fa87: Status 404 returned error can't find the container with id a0f4a039ca76aad490ec478db99c1fdf622ac26dbcbb68355a604771b0d1fa87 Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.568420 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9m8zz\" (UniqueName: \"kubernetes.io/projected/1688eab6-98cb-4e8e-97c5-f14a2fa0db76-kube-api-access-9m8zz\") pod \"downloads-7954f5f757-qtltp\" (UID: \"1688eab6-98cb-4e8e-97c5-f14a2fa0db76\") " pod="openshift-console/downloads-7954f5f757-qtltp" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.570508 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmp6c\" (UniqueName: \"kubernetes.io/projected/e5307db8-5382-4953-bf7e-9b9cc2b0d4c6-kube-api-access-xmp6c\") pod \"machine-approver-56656f9798-vmxsf\" (UID: \"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.588453 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6p6wr\" (UniqueName: \"kubernetes.io/projected/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-kube-api-access-6p6wr\") pod \"collect-profiles-29401005-fjmdj\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.590027 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-qtltp" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.612124 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.630255 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm7f2\" (UniqueName: \"kubernetes.io/projected/b97ce800-9e29-48e1-8047-83e363a75a16-kube-api-access-vm7f2\") pod \"catalog-operator-68c6474976-c49xs\" (UID: \"b97ce800-9e29-48e1-8047-83e363a75a16\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.631401 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hld9k\" (UniqueName: \"kubernetes.io/projected/b500c768-0c0e-4a28-8809-5e181e03bc5c-kube-api-access-hld9k\") pod \"service-ca-9c57cc56f-t9z5r\" (UID: \"b500c768-0c0e-4a28-8809-5e181e03bc5c\") " pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.647026 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb"] Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.647063 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp"] Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.648172 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj6h7\" (UniqueName: \"kubernetes.io/projected/a30b3fac-e050-452c-8806-2120c8a6fe6b-kube-api-access-tj6h7\") pod \"control-plane-machine-set-operator-78cbb6b69f-4dn4j\" (UID: \"a30b3fac-e050-452c-8806-2120c8a6fe6b\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j" Nov 25 08:51:26 crc kubenswrapper[4932]: W1125 08:51:26.659068 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod494bd606_c814_41b7_8c9a_f89487408a08.slice/crio-bacfe4727bff3e6cb3631c44dfeaaeb8678a00b724d01db60d1d3df79132a182 WatchSource:0}: Error finding container bacfe4727bff3e6cb3631c44dfeaaeb8678a00b724d01db60d1d3df79132a182: Status 404 returned error can't find the container with id bacfe4727bff3e6cb3631c44dfeaaeb8678a00b724d01db60d1d3df79132a182 Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.664797 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.670048 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57msn\" (UniqueName: \"kubernetes.io/projected/62c5310d-d281-4096-b5ae-fbdd368daa44-kube-api-access-57msn\") pod \"dns-operator-744455d44c-42gdf\" (UID: \"62c5310d-d281-4096-b5ae-fbdd368daa44\") " pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.685880 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:26 crc kubenswrapper[4932]: W1125 08:51:26.691988 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7f76ccd_1388_46e1_b71c_0b4352d86eaf.slice/crio-74c65febdda5505ab33581ad1aa2f4b6ce7c459d2b50c6ef87ccb9bb1423145c WatchSource:0}: Error finding container 74c65febdda5505ab33581ad1aa2f4b6ce7c459d2b50c6ef87ccb9bb1423145c: Status 404 returned error can't find the container with id 74c65febdda5505ab33581ad1aa2f4b6ce7c459d2b50c6ef87ccb9bb1423145c Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.699461 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qf2rf\" (UniqueName: \"kubernetes.io/projected/5d011f07-46e6-4102-bec8-022da84881ac-kube-api-access-qf2rf\") pod \"machine-config-controller-84d6567774-d8tft\" (UID: \"5d011f07-46e6-4102-bec8-022da84881ac\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.713734 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.714388 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmx5f\" (UniqueName: \"kubernetes.io/projected/18c360f0-89d1-46d9-ad99-40cc04b88546-kube-api-access-rmx5f\") pod \"service-ca-operator-777779d784-9lhfl\" (UID: \"18c360f0-89d1-46d9-ad99-40cc04b88546\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.719172 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.725429 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.730743 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8p2j\" (UniqueName: \"kubernetes.io/projected/831022da-9b12-412f-b477-9c592428fe60-kube-api-access-j8p2j\") pod \"machine-config-operator-74547568cd-pbnhb\" (UID: \"831022da-9b12-412f-b477-9c592428fe60\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.746868 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.795077 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h"] Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.807819 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/38dc74a2-971b-4e32-a1bd-d63805c021d5-srv-cert\") pod \"olm-operator-6b444d44fb-f8qtz\" (UID: \"38dc74a2-971b-4e32-a1bd-d63805c021d5\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.807861 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfkcd\" (UniqueName: \"kubernetes.io/projected/f72fef44-52e3-46ce-b5c7-1e7d32b6c50b-kube-api-access-zfkcd\") pod \"kube-storage-version-migrator-operator-b67b599dd-mtws7\" (UID: \"f72fef44-52e3-46ce-b5c7-1e7d32b6c50b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.807923 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/38dc74a2-971b-4e32-a1bd-d63805c021d5-profile-collector-cert\") pod \"olm-operator-6b444d44fb-f8qtz\" (UID: \"38dc74a2-971b-4e32-a1bd-d63805c021d5\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.807948 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/936fd4c5-860b-458e-a7cf-4feef1157c06-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-24l97\" (UID: \"936fd4c5-860b-458e-a7cf-4feef1157c06\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.807985 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttxdx\" (UniqueName: \"kubernetes.io/projected/38dc74a2-971b-4e32-a1bd-d63805c021d5-kube-api-access-ttxdx\") pod \"olm-operator-6b444d44fb-f8qtz\" (UID: \"38dc74a2-971b-4e32-a1bd-d63805c021d5\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808012 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/61828b1b-84ea-4648-ad3c-ab4c3c592743-metrics-certs\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808052 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czn85\" (UniqueName: \"kubernetes.io/projected/61828b1b-84ea-4648-ad3c-ab4c3c592743-kube-api-access-czn85\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808067 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d78b9892-02be-43e9-b63e-acefa1fff4b3-webhook-cert\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808109 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-bound-sa-token\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808124 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d78b9892-02be-43e9-b63e-acefa1fff4b3-tmpfs\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808172 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pvwm\" (UniqueName: \"kubernetes.io/projected/d78b9892-02be-43e9-b63e-acefa1fff4b3-kube-api-access-7pvwm\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808253 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ktv2t\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808277 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-tls\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808293 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5x72s\" (UniqueName: \"kubernetes.io/projected/c88f270c-7870-450c-b838-215ad7b41078-kube-api-access-5x72s\") pod \"migrator-59844c95c7-k6wpr\" (UID: \"c88f270c-7870-450c-b838-215ad7b41078\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808333 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/61828b1b-84ea-4648-ad3c-ab4c3c592743-default-certificate\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808363 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ktv2t\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808403 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808427 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5ch4\" (UniqueName: \"kubernetes.io/projected/c235a252-2ba4-4b5f-b04a-ca53a476ebde-kube-api-access-d5ch4\") pod \"multus-admission-controller-857f4d67dd-sbxc8\" (UID: \"c235a252-2ba4-4b5f-b04a-ca53a476ebde\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808448 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgrmq\" (UniqueName: \"kubernetes.io/projected/936fd4c5-860b-458e-a7cf-4feef1157c06-kube-api-access-cgrmq\") pod \"package-server-manager-789f6589d5-24l97\" (UID: \"936fd4c5-860b-458e-a7cf-4feef1157c06\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808492 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzcjb\" (UniqueName: \"kubernetes.io/projected/3776cbbe-bbc8-430b-8db5-881918c75fb2-kube-api-access-rzcjb\") pod \"marketplace-operator-79b997595-ktv2t\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808516 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d78b9892-02be-43e9-b63e-acefa1fff4b3-apiservice-cert\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808530 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dbba1e90-6d95-4837-b776-6a03a2e7901a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808545 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-trusted-ca\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808578 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f72fef44-52e3-46ce-b5c7-1e7d32b6c50b-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-mtws7\" (UID: \"f72fef44-52e3-46ce-b5c7-1e7d32b6c50b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808635 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3ec77ce-9c00-4f38-b4e3-1c3a715a321e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-cf4q4\" (UID: \"a3ec77ce-9c00-4f38-b4e3-1c3a715a321e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808649 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c235a252-2ba4-4b5f-b04a-ca53a476ebde-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sbxc8\" (UID: \"c235a252-2ba4-4b5f-b04a-ca53a476ebde\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808664 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3ec77ce-9c00-4f38-b4e3-1c3a715a321e-config\") pod \"kube-apiserver-operator-766d6c64bb-cf4q4\" (UID: \"a3ec77ce-9c00-4f38-b4e3-1c3a715a321e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808681 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/61828b1b-84ea-4648-ad3c-ab4c3c592743-stats-auth\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.808720 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61828b1b-84ea-4648-ad3c-ab4c3c592743-service-ca-bundle\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.810086 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54acd6a4-8682-4697-9985-c5a0132c9307-cert\") pod \"ingress-canary-zvnwp\" (UID: \"54acd6a4-8682-4697-9985-c5a0132c9307\") " pod="openshift-ingress-canary/ingress-canary-zvnwp" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.810107 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8lfz\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-kube-api-access-t8lfz\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.810157 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tmn5\" (UniqueName: \"kubernetes.io/projected/54acd6a4-8682-4697-9985-c5a0132c9307-kube-api-access-4tmn5\") pod \"ingress-canary-zvnwp\" (UID: \"54acd6a4-8682-4697-9985-c5a0132c9307\") " pod="openshift-ingress-canary/ingress-canary-zvnwp" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.810199 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f72fef44-52e3-46ce-b5c7-1e7d32b6c50b-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-mtws7\" (UID: \"f72fef44-52e3-46ce-b5c7-1e7d32b6c50b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.810249 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-certificates\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.810265 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dbba1e90-6d95-4837-b776-6a03a2e7901a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.810282 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a3ec77ce-9c00-4f38-b4e3-1c3a715a321e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-cf4q4\" (UID: \"a3ec77ce-9c00-4f38-b4e3-1c3a715a321e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" Nov 25 08:51:26 crc kubenswrapper[4932]: E1125 08:51:26.815535 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:27.315519638 +0000 UTC m=+147.441549201 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.840435 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.844419 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.855657 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.867780 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.884016 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.905972 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912125 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912337 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-certificates\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912365 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dbba1e90-6d95-4837-b776-6a03a2e7901a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912382 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a3ec77ce-9c00-4f38-b4e3-1c3a715a321e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-cf4q4\" (UID: \"a3ec77ce-9c00-4f38-b4e3-1c3a715a321e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912471 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjbs7\" (UniqueName: \"kubernetes.io/projected/01b0008e-1763-406f-b4c4-0ba777086a02-kube-api-access-xjbs7\") pod \"machine-config-server-mlf5b\" (UID: \"01b0008e-1763-406f-b4c4-0ba777086a02\") " pod="openshift-machine-config-operator/machine-config-server-mlf5b" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912489 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/38dc74a2-971b-4e32-a1bd-d63805c021d5-srv-cert\") pod \"olm-operator-6b444d44fb-f8qtz\" (UID: \"38dc74a2-971b-4e32-a1bd-d63805c021d5\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912512 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfkcd\" (UniqueName: \"kubernetes.io/projected/f72fef44-52e3-46ce-b5c7-1e7d32b6c50b-kube-api-access-zfkcd\") pod \"kube-storage-version-migrator-operator-b67b599dd-mtws7\" (UID: \"f72fef44-52e3-46ce-b5c7-1e7d32b6c50b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912558 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/38dc74a2-971b-4e32-a1bd-d63805c021d5-profile-collector-cert\") pod \"olm-operator-6b444d44fb-f8qtz\" (UID: \"38dc74a2-971b-4e32-a1bd-d63805c021d5\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912576 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-mountpoint-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912603 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/936fd4c5-860b-458e-a7cf-4feef1157c06-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-24l97\" (UID: \"936fd4c5-860b-458e-a7cf-4feef1157c06\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912620 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/01b0008e-1763-406f-b4c4-0ba777086a02-certs\") pod \"machine-config-server-mlf5b\" (UID: \"01b0008e-1763-406f-b4c4-0ba777086a02\") " pod="openshift-machine-config-operator/machine-config-server-mlf5b" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912673 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttxdx\" (UniqueName: \"kubernetes.io/projected/38dc74a2-971b-4e32-a1bd-d63805c021d5-kube-api-access-ttxdx\") pod \"olm-operator-6b444d44fb-f8qtz\" (UID: \"38dc74a2-971b-4e32-a1bd-d63805c021d5\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912700 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/61828b1b-84ea-4648-ad3c-ab4c3c592743-metrics-certs\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912734 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czn85\" (UniqueName: \"kubernetes.io/projected/61828b1b-84ea-4648-ad3c-ab4c3c592743-kube-api-access-czn85\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912751 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d78b9892-02be-43e9-b63e-acefa1fff4b3-webhook-cert\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912774 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwz2r\" (UniqueName: \"kubernetes.io/projected/45bcacf1-3275-4634-b7cd-909f2a77bc0e-kube-api-access-lwz2r\") pod \"dns-default-sdbr8\" (UID: \"45bcacf1-3275-4634-b7cd-909f2a77bc0e\") " pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912789 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-registration-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912827 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtbm7\" (UniqueName: \"kubernetes.io/projected/52c22cc7-413e-4e95-9796-59b6d6908bbf-kube-api-access-rtbm7\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912858 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-bound-sa-token\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912879 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d78b9892-02be-43e9-b63e-acefa1fff4b3-tmpfs\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:26 crc kubenswrapper[4932]: E1125 08:51:26.912900 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:27.412880164 +0000 UTC m=+147.538909727 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.912939 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-socket-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913009 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pvwm\" (UniqueName: \"kubernetes.io/projected/d78b9892-02be-43e9-b63e-acefa1fff4b3-kube-api-access-7pvwm\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913102 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ktv2t\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913128 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/01b0008e-1763-406f-b4c4-0ba777086a02-node-bootstrap-token\") pod \"machine-config-server-mlf5b\" (UID: \"01b0008e-1763-406f-b4c4-0ba777086a02\") " pod="openshift-machine-config-operator/machine-config-server-mlf5b" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913153 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-tls\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913204 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5x72s\" (UniqueName: \"kubernetes.io/projected/c88f270c-7870-450c-b838-215ad7b41078-kube-api-access-5x72s\") pod \"migrator-59844c95c7-k6wpr\" (UID: \"c88f270c-7870-450c-b838-215ad7b41078\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913255 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/61828b1b-84ea-4648-ad3c-ab4c3c592743-default-certificate\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913273 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45bcacf1-3275-4634-b7cd-909f2a77bc0e-config-volume\") pod \"dns-default-sdbr8\" (UID: \"45bcacf1-3275-4634-b7cd-909f2a77bc0e\") " pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913286 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d78b9892-02be-43e9-b63e-acefa1fff4b3-tmpfs\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913301 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ktv2t\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913324 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913343 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5ch4\" (UniqueName: \"kubernetes.io/projected/c235a252-2ba4-4b5f-b04a-ca53a476ebde-kube-api-access-d5ch4\") pod \"multus-admission-controller-857f4d67dd-sbxc8\" (UID: \"c235a252-2ba4-4b5f-b04a-ca53a476ebde\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913360 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgrmq\" (UniqueName: \"kubernetes.io/projected/936fd4c5-860b-458e-a7cf-4feef1157c06-kube-api-access-cgrmq\") pod \"package-server-manager-789f6589d5-24l97\" (UID: \"936fd4c5-860b-458e-a7cf-4feef1157c06\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913418 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d78b9892-02be-43e9-b63e-acefa1fff4b3-apiservice-cert\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913435 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzcjb\" (UniqueName: \"kubernetes.io/projected/3776cbbe-bbc8-430b-8db5-881918c75fb2-kube-api-access-rzcjb\") pod \"marketplace-operator-79b997595-ktv2t\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913452 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dbba1e90-6d95-4837-b776-6a03a2e7901a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913467 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-trusted-ca\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913482 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-plugins-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913552 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f72fef44-52e3-46ce-b5c7-1e7d32b6c50b-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-mtws7\" (UID: \"f72fef44-52e3-46ce-b5c7-1e7d32b6c50b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913638 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3ec77ce-9c00-4f38-b4e3-1c3a715a321e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-cf4q4\" (UID: \"a3ec77ce-9c00-4f38-b4e3-1c3a715a321e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913660 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c235a252-2ba4-4b5f-b04a-ca53a476ebde-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sbxc8\" (UID: \"c235a252-2ba4-4b5f-b04a-ca53a476ebde\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913679 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3ec77ce-9c00-4f38-b4e3-1c3a715a321e-config\") pod \"kube-apiserver-operator-766d6c64bb-cf4q4\" (UID: \"a3ec77ce-9c00-4f38-b4e3-1c3a715a321e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913694 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/61828b1b-84ea-4648-ad3c-ab4c3c592743-stats-auth\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913696 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-certificates\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913730 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61828b1b-84ea-4648-ad3c-ab4c3c592743-service-ca-bundle\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913749 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/45bcacf1-3275-4634-b7cd-909f2a77bc0e-metrics-tls\") pod \"dns-default-sdbr8\" (UID: \"45bcacf1-3275-4634-b7cd-909f2a77bc0e\") " pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913775 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54acd6a4-8682-4697-9985-c5a0132c9307-cert\") pod \"ingress-canary-zvnwp\" (UID: \"54acd6a4-8682-4697-9985-c5a0132c9307\") " pod="openshift-ingress-canary/ingress-canary-zvnwp" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913826 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8lfz\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-kube-api-access-t8lfz\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913886 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tmn5\" (UniqueName: \"kubernetes.io/projected/54acd6a4-8682-4697-9985-c5a0132c9307-kube-api-access-4tmn5\") pod \"ingress-canary-zvnwp\" (UID: \"54acd6a4-8682-4697-9985-c5a0132c9307\") " pod="openshift-ingress-canary/ingress-canary-zvnwp" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913904 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-csi-data-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.913921 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f72fef44-52e3-46ce-b5c7-1e7d32b6c50b-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-mtws7\" (UID: \"f72fef44-52e3-46ce-b5c7-1e7d32b6c50b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.914568 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a3ec77ce-9c00-4f38-b4e3-1c3a715a321e-config\") pod \"kube-apiserver-operator-766d6c64bb-cf4q4\" (UID: \"a3ec77ce-9c00-4f38-b4e3-1c3a715a321e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" Nov 25 08:51:26 crc kubenswrapper[4932]: E1125 08:51:26.914619 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:27.414610023 +0000 UTC m=+147.540639576 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.917349 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-ktv2t\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.920064 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/61828b1b-84ea-4648-ad3c-ab4c3c592743-stats-auth\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.925100 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dbba1e90-6d95-4837-b776-6a03a2e7901a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.931649 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-tls\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.932234 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a3ec77ce-9c00-4f38-b4e3-1c3a715a321e-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-cf4q4\" (UID: \"a3ec77ce-9c00-4f38-b4e3-1c3a715a321e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.934970 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-trusted-ca\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.937369 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.940250 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/61828b1b-84ea-4648-ad3c-ab4c3c592743-metrics-certs\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.940489 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f72fef44-52e3-46ce-b5c7-1e7d32b6c50b-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-mtws7\" (UID: \"f72fef44-52e3-46ce-b5c7-1e7d32b6c50b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.940512 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-ktv2t\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.941454 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/936fd4c5-860b-458e-a7cf-4feef1157c06-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-24l97\" (UID: \"936fd4c5-860b-458e-a7cf-4feef1157c06\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.941595 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dbba1e90-6d95-4837-b776-6a03a2e7901a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.942337 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f72fef44-52e3-46ce-b5c7-1e7d32b6c50b-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-mtws7\" (UID: \"f72fef44-52e3-46ce-b5c7-1e7d32b6c50b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.942499 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/38dc74a2-971b-4e32-a1bd-d63805c021d5-profile-collector-cert\") pod \"olm-operator-6b444d44fb-f8qtz\" (UID: \"38dc74a2-971b-4e32-a1bd-d63805c021d5\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.945355 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c235a252-2ba4-4b5f-b04a-ca53a476ebde-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sbxc8\" (UID: \"c235a252-2ba4-4b5f-b04a-ca53a476ebde\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.947680 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d78b9892-02be-43e9-b63e-acefa1fff4b3-webhook-cert\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.947962 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/61828b1b-84ea-4648-ad3c-ab4c3c592743-default-certificate\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.957199 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/54acd6a4-8682-4697-9985-c5a0132c9307-cert\") pod \"ingress-canary-zvnwp\" (UID: \"54acd6a4-8682-4697-9985-c5a0132c9307\") " pod="openshift-ingress-canary/ingress-canary-zvnwp" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.958694 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d78b9892-02be-43e9-b63e-acefa1fff4b3-apiservice-cert\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.958697 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/61828b1b-84ea-4648-ad3c-ab4c3c592743-service-ca-bundle\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.959456 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5ch4\" (UniqueName: \"kubernetes.io/projected/c235a252-2ba4-4b5f-b04a-ca53a476ebde-kube-api-access-d5ch4\") pod \"multus-admission-controller-857f4d67dd-sbxc8\" (UID: \"c235a252-2ba4-4b5f-b04a-ca53a476ebde\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.961661 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/38dc74a2-971b-4e32-a1bd-d63805c021d5-srv-cert\") pod \"olm-operator-6b444d44fb-f8qtz\" (UID: \"38dc74a2-971b-4e32-a1bd-d63805c021d5\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.985974 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j"] Nov 25 08:51:26 crc kubenswrapper[4932]: I1125 08:51:26.989751 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgrmq\" (UniqueName: \"kubernetes.io/projected/936fd4c5-860b-458e-a7cf-4feef1157c06-kube-api-access-cgrmq\") pod \"package-server-manager-789f6589d5-24l97\" (UID: \"936fd4c5-860b-458e-a7cf-4feef1157c06\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.000286 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-5kf8q"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.001000 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttxdx\" (UniqueName: \"kubernetes.io/projected/38dc74a2-971b-4e32-a1bd-d63805c021d5-kube-api-access-ttxdx\") pod \"olm-operator-6b444d44fb-f8qtz\" (UID: \"38dc74a2-971b-4e32-a1bd-d63805c021d5\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.011143 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pvwm\" (UniqueName: \"kubernetes.io/projected/d78b9892-02be-43e9-b63e-acefa1fff4b3-kube-api-access-7pvwm\") pod \"packageserver-d55dfcdfc-tj2jk\" (UID: \"d78b9892-02be-43e9-b63e-acefa1fff4b3\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.018557 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.018739 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/01b0008e-1763-406f-b4c4-0ba777086a02-node-bootstrap-token\") pod \"machine-config-server-mlf5b\" (UID: \"01b0008e-1763-406f-b4c4-0ba777086a02\") " pod="openshift-machine-config-operator/machine-config-server-mlf5b" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.018781 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45bcacf1-3275-4634-b7cd-909f2a77bc0e-config-volume\") pod \"dns-default-sdbr8\" (UID: \"45bcacf1-3275-4634-b7cd-909f2a77bc0e\") " pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.018818 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-plugins-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.018913 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:27.518875435 +0000 UTC m=+147.644904988 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.018971 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/45bcacf1-3275-4634-b7cd-909f2a77bc0e-metrics-tls\") pod \"dns-default-sdbr8\" (UID: \"45bcacf1-3275-4634-b7cd-909f2a77bc0e\") " pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.019023 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-csi-data-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.019046 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-plugins-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.020325 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjbs7\" (UniqueName: \"kubernetes.io/projected/01b0008e-1763-406f-b4c4-0ba777086a02-kube-api-access-xjbs7\") pod \"machine-config-server-mlf5b\" (UID: \"01b0008e-1763-406f-b4c4-0ba777086a02\") " pod="openshift-machine-config-operator/machine-config-server-mlf5b" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.020388 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-mountpoint-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.020406 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/01b0008e-1763-406f-b4c4-0ba777086a02-certs\") pod \"machine-config-server-mlf5b\" (UID: \"01b0008e-1763-406f-b4c4-0ba777086a02\") " pod="openshift-machine-config-operator/machine-config-server-mlf5b" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.020464 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwz2r\" (UniqueName: \"kubernetes.io/projected/45bcacf1-3275-4634-b7cd-909f2a77bc0e-kube-api-access-lwz2r\") pod \"dns-default-sdbr8\" (UID: \"45bcacf1-3275-4634-b7cd-909f2a77bc0e\") " pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.020485 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-registration-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.020503 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-socket-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.020519 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtbm7\" (UniqueName: \"kubernetes.io/projected/52c22cc7-413e-4e95-9796-59b6d6908bbf-kube-api-access-rtbm7\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.021094 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-mountpoint-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.021644 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/45bcacf1-3275-4634-b7cd-909f2a77bc0e-config-volume\") pod \"dns-default-sdbr8\" (UID: \"45bcacf1-3275-4634-b7cd-909f2a77bc0e\") " pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.026725 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/01b0008e-1763-406f-b4c4-0ba777086a02-node-bootstrap-token\") pod \"machine-config-server-mlf5b\" (UID: \"01b0008e-1763-406f-b4c4-0ba777086a02\") " pod="openshift-machine-config-operator/machine-config-server-mlf5b" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.026890 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-registration-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.027213 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/45bcacf1-3275-4634-b7cd-909f2a77bc0e-metrics-tls\") pod \"dns-default-sdbr8\" (UID: \"45bcacf1-3275-4634-b7cd-909f2a77bc0e\") " pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.027280 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-csi-data-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.027402 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/52c22cc7-413e-4e95-9796-59b6d6908bbf-socket-dir\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.028746 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/01b0008e-1763-406f-b4c4-0ba777086a02-certs\") pod \"machine-config-server-mlf5b\" (UID: \"01b0008e-1763-406f-b4c4-0ba777086a02\") " pod="openshift-machine-config-operator/machine-config-server-mlf5b" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.035595 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a3ec77ce-9c00-4f38-b4e3-1c3a715a321e-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-cf4q4\" (UID: \"a3ec77ce-9c00-4f38-b4e3-1c3a715a321e\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.036826 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-qtltp"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.038010 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.053906 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-bound-sa-token\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.075084 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.080047 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czn85\" (UniqueName: \"kubernetes.io/projected/61828b1b-84ea-4648-ad3c-ab4c3c592743-kube-api-access-czn85\") pod \"router-default-5444994796-bmncc\" (UID: \"61828b1b-84ea-4648-ad3c-ab4c3c592743\") " pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.115457 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.118440 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.118639 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.121727 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.121889 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr"] Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.121988 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:27.621976271 +0000 UTC m=+147.748005834 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.122717 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfkcd\" (UniqueName: \"kubernetes.io/projected/f72fef44-52e3-46ce-b5c7-1e7d32b6c50b-kube-api-access-zfkcd\") pod \"kube-storage-version-migrator-operator-b67b599dd-mtws7\" (UID: \"f72fef44-52e3-46ce-b5c7-1e7d32b6c50b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.124286 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-2q4hk"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.129886 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.131321 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.138740 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-jdjgs"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.142583 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8lfz\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-kube-api-access-t8lfz\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.151954 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzcjb\" (UniqueName: \"kubernetes.io/projected/3776cbbe-bbc8-430b-8db5-881918c75fb2-kube-api-access-rzcjb\") pod \"marketplace-operator-79b997595-ktv2t\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.160669 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.169831 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tmn5\" (UniqueName: \"kubernetes.io/projected/54acd6a4-8682-4697-9985-c5a0132c9307-kube-api-access-4tmn5\") pod \"ingress-canary-zvnwp\" (UID: \"54acd6a4-8682-4697-9985-c5a0132c9307\") " pod="openshift-ingress-canary/ingress-canary-zvnwp" Nov 25 08:51:27 crc kubenswrapper[4932]: W1125 08:51:27.172969 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod333e3fc9_dd6b_4295_9555_1f8c66440d44.slice/crio-bef28fd743bc8908bfd8d0e5a28e31a37f9d02945c4c8fa9d335d4796685524a WatchSource:0}: Error finding container bef28fd743bc8908bfd8d0e5a28e31a37f9d02945c4c8fa9d335d4796685524a: Status 404 returned error can't find the container with id bef28fd743bc8908bfd8d0e5a28e31a37f9d02945c4c8fa9d335d4796685524a Nov 25 08:51:27 crc kubenswrapper[4932]: W1125 08:51:27.182068 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7710aca4_ceb7_4162_90b5_f8adc32e49bf.slice/crio-bc25cdae817467155a8dab59290054a9e091ce37cf1e4aa9ca433b82207dcf72 WatchSource:0}: Error finding container bc25cdae817467155a8dab59290054a9e091ce37cf1e4aa9ca433b82207dcf72: Status 404 returned error can't find the container with id bc25cdae817467155a8dab59290054a9e091ce37cf1e4aa9ca433b82207dcf72 Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.188810 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.194700 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5x72s\" (UniqueName: \"kubernetes.io/projected/c88f270c-7870-450c-b838-215ad7b41078-kube-api-access-5x72s\") pod \"migrator-59844c95c7-k6wpr\" (UID: \"c88f270c-7870-450c-b838-215ad7b41078\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.196872 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" Nov 25 08:51:27 crc kubenswrapper[4932]: W1125 08:51:27.207128 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37c802fe_446b_4a2f_a17d_6db1eafb0318.slice/crio-5bf7107e75caa9d0de38a13fd3cf89be7e955f7ca1f0eee7241d9cfb79c8f4de WatchSource:0}: Error finding container 5bf7107e75caa9d0de38a13fd3cf89be7e955f7ca1f0eee7241d9cfb79c8f4de: Status 404 returned error can't find the container with id 5bf7107e75caa9d0de38a13fd3cf89be7e955f7ca1f0eee7241d9cfb79c8f4de Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.217613 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtbm7\" (UniqueName: \"kubernetes.io/projected/52c22cc7-413e-4e95-9796-59b6d6908bbf-kube-api-access-rtbm7\") pod \"csi-hostpathplugin-cxlvv\" (UID: \"52c22cc7-413e-4e95-9796-59b6d6908bbf\") " pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.218175 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.222608 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.222985 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:27.722969662 +0000 UTC m=+147.848999225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.227077 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.235637 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwz2r\" (UniqueName: \"kubernetes.io/projected/45bcacf1-3275-4634-b7cd-909f2a77bc0e-kube-api-access-lwz2r\") pod \"dns-default-sdbr8\" (UID: \"45bcacf1-3275-4634-b7cd-909f2a77bc0e\") " pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.242551 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-zvnwp" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.259899 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjbs7\" (UniqueName: \"kubernetes.io/projected/01b0008e-1763-406f-b4c4-0ba777086a02-kube-api-access-xjbs7\") pod \"machine-config-server-mlf5b\" (UID: \"01b0008e-1763-406f-b4c4-0ba777086a02\") " pod="openshift-machine-config-operator/machine-config-server-mlf5b" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.261765 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.268427 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.269390 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-mlf5b" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.314683 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-t6qks"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.319895 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.323520 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.323730 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:27.823720133 +0000 UTC m=+147.949749696 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.391843 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-c2l58"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.409820 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ksq5j"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.432915 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.433075 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:27.933047768 +0000 UTC m=+148.059077331 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.433596 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.433964 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:27.933949024 +0000 UTC m=+148.059978587 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.435057 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-qtltp" event={"ID":"1688eab6-98cb-4e8e-97c5-f14a2fa0db76","Type":"ContainerStarted","Data":"a5adceb9bad15f3650cbdcdd192a95b5f3b0ab72acf9e06998981f67426e2e29"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.467745 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" event={"ID":"ca7ad64f-5d34-4269-9faf-46bc2e3cab93","Type":"ContainerStarted","Data":"4e5c053228d577b4ffb068f7252c437839d4d2611985d3437e8cb2f030c9def9"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.468759 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" event={"ID":"0ccea157-c5a1-4e31-958e-095aa3b77b80","Type":"ContainerStarted","Data":"aa0b925afca03f52f50257251a4ed16b22572a9735fdb1cabe8f75df634eb307"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.475435 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" event={"ID":"333e3fc9-dd6b-4295-9555-1f8c66440d44","Type":"ContainerStarted","Data":"bef28fd743bc8908bfd8d0e5a28e31a37f9d02945c4c8fa9d335d4796685524a"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.476216 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.479135 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" event={"ID":"f7f76ccd-1388-46e1-b71c-0b4352d86eaf","Type":"ContainerStarted","Data":"1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.479174 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" event={"ID":"f7f76ccd-1388-46e1-b71c-0b4352d86eaf","Type":"ContainerStarted","Data":"74c65febdda5505ab33581ad1aa2f4b6ce7c459d2b50c6ef87ccb9bb1423145c"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.479593 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.484979 4932 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-v2fzp container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.485024 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" podUID="f7f76ccd-1388-46e1-b71c-0b4352d86eaf" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.491946 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" event={"ID":"868f657f-d9b7-43c8-a706-a7657f16ce42","Type":"ContainerStarted","Data":"1016f0ec3a5ed42606917994b0cb14de145f7826f7a58eaa6f76e35d0c51e19d"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.491999 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" event={"ID":"868f657f-d9b7-43c8-a706-a7657f16ce42","Type":"ContainerStarted","Data":"a0f4a039ca76aad490ec478db99c1fdf622ac26dbcbb68355a604771b0d1fa87"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.495843 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zffp5" event={"ID":"157b16a5-6638-4f93-b6ae-616cadd9eb21","Type":"ContainerStarted","Data":"f3e9fb0a037a43f067f1cd9906942aef2b90e90b81d1f5e4e9f5e38fccb9fb6e"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.499221 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" event={"ID":"ba443197-0445-4bc8-915a-d4d6f49bdea7","Type":"ContainerStarted","Data":"358f606838b74edb70b0fbe16c04d06264b817bb781583b87afa5b0d9e15c74e"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.502399 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" event={"ID":"9945a2a9-7f64-4d7c-bab3-aca70803734d","Type":"ContainerStarted","Data":"06ae07009a468293a6c640558f3138f2c3c2a9b44ff8922920f8e038e6566242"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.503887 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" event={"ID":"a2c26b18-e2d1-44ff-91bf-44e7a6d1a097","Type":"ContainerStarted","Data":"971f3749b87730ffa36ab24e9ee76f66aa0ae7ad8cb51bdf7276edcabb27d9fb"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.505131 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5kf8q" event={"ID":"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49","Type":"ContainerStarted","Data":"9b9dfe4e22f03da03d3ebc25365c15fcc7aeb19d49453a1e1a09d619da00f71b"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.506434 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" event={"ID":"494bd606-c814-41b7-8c9a-f89487408a08","Type":"ContainerStarted","Data":"9919cb37f94d9072cfcee14bd4ebb1db201d83b3878e03b96aea26e45f9973f3"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.506458 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" event={"ID":"494bd606-c814-41b7-8c9a-f89487408a08","Type":"ContainerStarted","Data":"bacfe4727bff3e6cb3631c44dfeaaeb8678a00b724d01db60d1d3df79132a182"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.508506 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" event={"ID":"7710aca4-ceb7-4162-90b5-f8adc32e49bf","Type":"ContainerStarted","Data":"bc25cdae817467155a8dab59290054a9e091ce37cf1e4aa9ca433b82207dcf72"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.511735 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" event={"ID":"37c802fe-446b-4a2f-a17d-6db1eafb0318","Type":"ContainerStarted","Data":"5bf7107e75caa9d0de38a13fd3cf89be7e955f7ca1f0eee7241d9cfb79c8f4de"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.525817 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" event={"ID":"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6","Type":"ContainerStarted","Data":"297c76ac2aba9894a50f2f2f257a65de7b59aac7413420e8650d448c548172c5"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.526581 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" event={"ID":"91fee7b5-f700-4555-9ced-964fa79ba338","Type":"ContainerStarted","Data":"f9a21b961924549651a6ec88342acfa651809a0de90f72cc5b09f212aab5cc61"} Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.533986 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.535403 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:28.035386993 +0000 UTC m=+148.161416556 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.535428 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.535686 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.536016 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:28.036009738 +0000 UTC m=+148.162039301 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.613204 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.638505 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.640030 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:28.13999977 +0000 UTC m=+148.266029353 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.746082 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-42gdf"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.759774 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.761246 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:28.261234881 +0000 UTC m=+148.387264454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.768369 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.807383 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.819903 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.843595 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-t9z5r"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.862781 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.863306 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:28.363273584 +0000 UTC m=+148.489303147 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.865720 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.906333 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.906370 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7"] Nov 25 08:51:27 crc kubenswrapper[4932]: I1125 08:51:27.963940 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:27 crc kubenswrapper[4932]: E1125 08:51:27.964233 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:28.464222824 +0000 UTC m=+148.590252387 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.021965 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl"] Nov 25 08:51:28 crc kubenswrapper[4932]: W1125 08:51:28.043749 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb97ce800_9e29_48e1_8047_83e363a75a16.slice/crio-f8ad276e4ffa50e53a4c05b3d639229e69ba36d4b7dfebd89941d9634889720f WatchSource:0}: Error finding container f8ad276e4ffa50e53a4c05b3d639229e69ba36d4b7dfebd89941d9634889720f: Status 404 returned error can't find the container with id f8ad276e4ffa50e53a4c05b3d639229e69ba36d4b7dfebd89941d9634889720f Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.069929 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:28 crc kubenswrapper[4932]: E1125 08:51:28.070314 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:28.570299418 +0000 UTC m=+148.696328981 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:28 crc kubenswrapper[4932]: W1125 08:51:28.090860 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb500c768_0c0e_4a28_8809_5e181e03bc5c.slice/crio-bee02ed8871a362283aecbd11f5f71fb93f8fbc8ab07e2eeac244006d439918c WatchSource:0}: Error finding container bee02ed8871a362283aecbd11f5f71fb93f8fbc8ab07e2eeac244006d439918c: Status 404 returned error can't find the container with id bee02ed8871a362283aecbd11f5f71fb93f8fbc8ab07e2eeac244006d439918c Nov 25 08:51:28 crc kubenswrapper[4932]: W1125 08:51:28.101741 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01b0008e_1763_406f_b4c4_0ba777086a02.slice/crio-f6d49881b28991ebc52e08e0ac3c1bcb82a316ea2e35d8d5f2d34b4abb739792 WatchSource:0}: Error finding container f6d49881b28991ebc52e08e0ac3c1bcb82a316ea2e35d8d5f2d34b4abb739792: Status 404 returned error can't find the container with id f6d49881b28991ebc52e08e0ac3c1bcb82a316ea2e35d8d5f2d34b4abb739792 Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.171425 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:28 crc kubenswrapper[4932]: E1125 08:51:28.171752 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:28.671741668 +0000 UTC m=+148.797771231 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.272570 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:28 crc kubenswrapper[4932]: E1125 08:51:28.273292 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:28.77327605 +0000 UTC m=+148.899305613 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.381132 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:28 crc kubenswrapper[4932]: E1125 08:51:28.381458 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:28.881446408 +0000 UTC m=+149.007475971 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.422864 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" podStartSLOduration=126.422847485 podStartE2EDuration="2m6.422847485s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:28.422122576 +0000 UTC m=+148.548152149" watchObservedRunningTime="2025-11-25 08:51:28.422847485 +0000 UTC m=+148.548877048" Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.431363 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sbxc8"] Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.483084 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:28 crc kubenswrapper[4932]: E1125 08:51:28.483522 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:28.983506372 +0000 UTC m=+149.109535935 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.585784 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.586608 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" event={"ID":"5d011f07-46e6-4102-bec8-022da84881ac","Type":"ContainerStarted","Data":"2cd45ca6633a0b72127b10b16d256216e1f9ea9b0af1c1baededca9fbc155c25"} Nov 25 08:51:28 crc kubenswrapper[4932]: E1125 08:51:28.586989 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.086960862 +0000 UTC m=+149.212990425 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.619703 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97"] Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.619841 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" event={"ID":"1446a2a9-8c10-4801-a3ce-2e08d66c81b2","Type":"ContainerStarted","Data":"7c867a8bf013f67f34c5ebc7ffaf8ab94491c593a76aaa4175543d33c3f74ae0"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.620613 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" event={"ID":"21fffc77-e724-4f48-ac20-f21104224241","Type":"ContainerStarted","Data":"3c6b19f86770ffbae1cfee8c26285e3e2087928a80f70f4d681c7781ee826fb1"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.621283 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-t6qks" event={"ID":"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e","Type":"ContainerStarted","Data":"965c128bb433e7b89681b5f13549f06c82324d719e874ee2f4b03d31c7768b2d"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.622570 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-qtltp" event={"ID":"1688eab6-98cb-4e8e-97c5-f14a2fa0db76","Type":"ContainerStarted","Data":"085bc38b1544f786662b2b7c6488179fd1e0f43a25dc04839932d998afcd2e8a"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.623417 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-qtltp" Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.625592 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" event={"ID":"b97ce800-9e29-48e1-8047-83e363a75a16","Type":"ContainerStarted","Data":"f8ad276e4ffa50e53a4c05b3d639229e69ba36d4b7dfebd89941d9634889720f"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.627129 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" event={"ID":"0ccea157-c5a1-4e31-958e-095aa3b77b80","Type":"ContainerStarted","Data":"98cf116121ba135869681ebc6bfec6fd018576ead7098488ac8760cefee1ac43"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.628146 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j" event={"ID":"a30b3fac-e050-452c-8806-2120c8a6fe6b","Type":"ContainerStarted","Data":"a6c75fd79d0e006c45b9cd557c9c409c2017e023f8bda61233bf965d405bf8b4"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.629620 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" event={"ID":"f72fef44-52e3-46ce-b5c7-1e7d32b6c50b","Type":"ContainerStarted","Data":"5fa98a3423d17708e29ada5c5104a1596b03f046da84f7ad296ed86679855e92"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.630560 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" event={"ID":"18c360f0-89d1-46d9-ad99-40cc04b88546","Type":"ContainerStarted","Data":"0608aa9a9c895df30a3aa4b0114b4e8c5ba30ecf8ccb75eb6297ad04e0826bf0"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.646301 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" event={"ID":"7710aca4-ceb7-4162-90b5-f8adc32e49bf","Type":"ContainerStarted","Data":"e8121b872e55e194dbd638f968bb1653a9d06f8de4f13c31fbf3ed0c40577b71"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.657770 4932 patch_prober.go:28] interesting pod/downloads-7954f5f757-qtltp container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.657844 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-qtltp" podUID="1688eab6-98cb-4e8e-97c5-f14a2fa0db76" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.683026 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5kf8q" event={"ID":"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49","Type":"ContainerStarted","Data":"db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.687168 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:28 crc kubenswrapper[4932]: E1125 08:51:28.687490 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.187474724 +0000 UTC m=+149.313504287 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.690637 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" event={"ID":"a3ec77ce-9c00-4f38-b4e3-1c3a715a321e","Type":"ContainerStarted","Data":"89ddf350fe9a988e14c406623aec63fc70e027d03d2d8fc532a7448f33ab495b"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.691622 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" event={"ID":"b500c768-0c0e-4a28-8809-5e181e03bc5c","Type":"ContainerStarted","Data":"bee02ed8871a362283aecbd11f5f71fb93f8fbc8ab07e2eeac244006d439918c"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.692401 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" event={"ID":"62c5310d-d281-4096-b5ae-fbdd368daa44","Type":"ContainerStarted","Data":"15178b1d61c37cbca69dac4c9b769e7b241ff429858b401a1957e735ff18effd"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.718552 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" event={"ID":"868f657f-d9b7-43c8-a706-a7657f16ce42","Type":"ContainerStarted","Data":"91970f59bdc771b8fe6785f5a8f9ef56732f79aea3175174ae75ed7a53b61f3b"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.740616 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-bmncc" event={"ID":"61828b1b-84ea-4648-ad3c-ab4c3c592743","Type":"ContainerStarted","Data":"d666dde036a315f7d699bf38ac598d098cad241bec7b450f12e082a718ed4e35"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.789950 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:28 crc kubenswrapper[4932]: E1125 08:51:28.792466 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.292454105 +0000 UTC m=+149.418483668 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.802792 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-txqrb" podStartSLOduration=126.802775198 podStartE2EDuration="2m6.802775198s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:28.776288888 +0000 UTC m=+148.902318471" watchObservedRunningTime="2025-11-25 08:51:28.802775198 +0000 UTC m=+148.928804761" Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.839555 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" podStartSLOduration=126.839541809 podStartE2EDuration="2m6.839541809s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:28.836030648 +0000 UTC m=+148.962060201" watchObservedRunningTime="2025-11-25 08:51:28.839541809 +0000 UTC m=+148.965571372" Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.842300 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-cxlvv"] Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.868480 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-sdbr8"] Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.898787 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:28 crc kubenswrapper[4932]: E1125 08:51:28.900293 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.400260369 +0000 UTC m=+149.526289932 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.918065 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-zvnwp"] Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.938942 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" event={"ID":"831022da-9b12-412f-b477-9c592428fe60","Type":"ContainerStarted","Data":"0e20e9f6eb5c27e433deb605b2da2aa953e30fde5c0c76169f9781c186eca8c6"} Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.949025 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk"] Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.974329 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-qtltp" podStartSLOduration=126.974312442 podStartE2EDuration="2m6.974312442s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:28.91327761 +0000 UTC m=+149.039307183" watchObservedRunningTime="2025-11-25 08:51:28.974312442 +0000 UTC m=+149.100341995" Nov 25 08:51:28 crc kubenswrapper[4932]: I1125 08:51:28.976606 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-5lgl4" podStartSLOduration=126.976597733 podStartE2EDuration="2m6.976597733s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:28.954125154 +0000 UTC m=+149.080154717" watchObservedRunningTime="2025-11-25 08:51:28.976597733 +0000 UTC m=+149.102627296" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.004325 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:29 crc kubenswrapper[4932]: E1125 08:51:29.004706 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.504695418 +0000 UTC m=+149.630724981 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.010639 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz"] Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.010685 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" event={"ID":"a2c26b18-e2d1-44ff-91bf-44e7a6d1a097","Type":"ContainerStarted","Data":"69533bb2e7eeb7a1256e49e24988218071a80bb015428f94531dd573580cbaac"} Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.010715 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ktv2t"] Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.018894 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-5kf8q" podStartSLOduration=127.018872345 podStartE2EDuration="2m7.018872345s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:29.008524441 +0000 UTC m=+149.134554004" watchObservedRunningTime="2025-11-25 08:51:29.018872345 +0000 UTC m=+149.144901908" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.030144 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" event={"ID":"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6","Type":"ContainerStarted","Data":"612a72f94cf04cf65504cabafc6278bb19441f275e4f18d3674e2570cd9ec101"} Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.035526 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-2x94m" podStartSLOduration=127.035448768 podStartE2EDuration="2m7.035448768s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:29.03425098 +0000 UTC m=+149.160280543" watchObservedRunningTime="2025-11-25 08:51:29.035448768 +0000 UTC m=+149.161478331" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.044562 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" event={"ID":"37c802fe-446b-4a2f-a17d-6db1eafb0318","Type":"ContainerStarted","Data":"98fa121d7ce3d463f6bdc28adc03f2d1f05184af80cdd31f28c40c7450ebedbe"} Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.052217 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr"] Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.067519 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" event={"ID":"ba443197-0445-4bc8-915a-d4d6f49bdea7","Type":"ContainerStarted","Data":"30e1e82abf5de63191ced097481812ee6b1550f39a11b787c7b5530779dae9b1"} Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.068149 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-jdjgs" podStartSLOduration=127.068130126 podStartE2EDuration="2m7.068130126s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:29.066563463 +0000 UTC m=+149.192593016" watchObservedRunningTime="2025-11-25 08:51:29.068130126 +0000 UTC m=+149.194159689" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.105251 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:29 crc kubenswrapper[4932]: E1125 08:51:29.105620 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.605604546 +0000 UTC m=+149.731634109 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.124561 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" event={"ID":"ca7ad64f-5d34-4269-9faf-46bc2e3cab93","Type":"ContainerStarted","Data":"68d7c8e43ca36fc11bb0e1fb85e0b806bafbc454fe81ee3a899a6d5d66068264"} Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.126558 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-kjf9j" podStartSLOduration=127.126544983 podStartE2EDuration="2m7.126544983s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:29.124638807 +0000 UTC m=+149.250668360" watchObservedRunningTime="2025-11-25 08:51:29.126544983 +0000 UTC m=+149.252574546" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.130698 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-mlf5b" event={"ID":"01b0008e-1763-406f-b4c4-0ba777086a02","Type":"ContainerStarted","Data":"f6d49881b28991ebc52e08e0ac3c1bcb82a316ea2e35d8d5f2d34b4abb739792"} Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.133778 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" event={"ID":"aa23081e-8667-4a54-a39f-8a8073436dd9","Type":"ContainerStarted","Data":"697ab21a2cd9116426b3c50b2b4fc5a3a82f600ecc2626f03012e67f679bcc34"} Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.153767 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.210377 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" podStartSLOduration=128.210356157 podStartE2EDuration="2m8.210356157s" podCreationTimestamp="2025-11-25 08:49:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:29.151903438 +0000 UTC m=+149.277933001" watchObservedRunningTime="2025-11-25 08:51:29.210356157 +0000 UTC m=+149.336385720" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.212922 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:29 crc kubenswrapper[4932]: E1125 08:51:29.213986 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.713972542 +0000 UTC m=+149.840002105 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.313779 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:29 crc kubenswrapper[4932]: E1125 08:51:29.314210 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.814177672 +0000 UTC m=+149.940207245 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.417270 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:29 crc kubenswrapper[4932]: E1125 08:51:29.417853 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:29.91784055 +0000 UTC m=+150.043870113 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.518776 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.518926 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.518969 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.519007 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.519070 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:29 crc kubenswrapper[4932]: E1125 08:51:29.519581 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.01955368 +0000 UTC m=+150.145583253 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.523157 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.526653 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.527004 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.534839 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.626528 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:29 crc kubenswrapper[4932]: E1125 08:51:29.626850 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.126838693 +0000 UTC m=+150.252868256 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.727311 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:29 crc kubenswrapper[4932]: E1125 08:51:29.727673 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.227658237 +0000 UTC m=+150.353687800 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.733820 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.759073 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.766411 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.828449 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:29 crc kubenswrapper[4932]: E1125 08:51:29.828691 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.32868045 +0000 UTC m=+150.454710013 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.931379 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:29 crc kubenswrapper[4932]: E1125 08:51:29.932023 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.431995174 +0000 UTC m=+150.558024737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:29 crc kubenswrapper[4932]: I1125 08:51:29.932306 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:29 crc kubenswrapper[4932]: E1125 08:51:29.932662 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.4326497 +0000 UTC m=+150.558679263 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.035347 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.037088 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.537070409 +0000 UTC m=+150.663099972 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.137410 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.137851 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.637835681 +0000 UTC m=+150.763865234 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.210597 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" event={"ID":"831022da-9b12-412f-b477-9c592428fe60","Type":"ContainerStarted","Data":"bfd18224824ce380a2980c6c65ce7a6d1182635baaec0ea22ef7ead2c9b4cf6f"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.241258 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.241519 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.741503729 +0000 UTC m=+150.867533292 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.258124 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" event={"ID":"c235a252-2ba4-4b5f-b04a-ca53a476ebde","Type":"ContainerStarted","Data":"a7076d740de4ff1f7cd3c08a2ea76cd79779ac55bd7ce20127736f3d9d3d6b49"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.258447 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" event={"ID":"c235a252-2ba4-4b5f-b04a-ca53a476ebde","Type":"ContainerStarted","Data":"ac15c36059e2e278013cb75d4324cc0363ad01bcd210c8aceb2283fbaa58f845"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.282675 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j" event={"ID":"a30b3fac-e050-452c-8806-2120c8a6fe6b","Type":"ContainerStarted","Data":"5d86789be2b532baed612a213d1dedcfd6101a3e4d1421b752c9f719bbb5a713"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.302980 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" event={"ID":"d78b9892-02be-43e9-b63e-acefa1fff4b3","Type":"ContainerStarted","Data":"1d952a00478350d390827dc1ad30c10eecec7412835aac642a222c8a38ddebc4"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.311087 4932 generic.go:334] "Generic (PLEG): container finished" podID="9945a2a9-7f64-4d7c-bab3-aca70803734d" containerID="8388e9945840148ae2f8d5f7644a21ddcd981777ddca394c57db4c02c1aa62d7" exitCode=0 Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.311862 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" event={"ID":"9945a2a9-7f64-4d7c-bab3-aca70803734d","Type":"ContainerDied","Data":"8388e9945840148ae2f8d5f7644a21ddcd981777ddca394c57db4c02c1aa62d7"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.322651 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4dn4j" podStartSLOduration=128.322637966 podStartE2EDuration="2m8.322637966s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:30.320337374 +0000 UTC m=+150.446366937" watchObservedRunningTime="2025-11-25 08:51:30.322637966 +0000 UTC m=+150.448667529" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.334839 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-bmncc" event={"ID":"61828b1b-84ea-4648-ad3c-ab4c3c592743","Type":"ContainerStarted","Data":"7d90bdec0e07a2308fd13cb0e2359d96c9bab3ea1266d91db15acb52b8bb5326"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.350939 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.351324 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.851312973 +0000 UTC m=+150.977342536 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.373832 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-mlf5b" event={"ID":"01b0008e-1763-406f-b4c4-0ba777086a02","Type":"ContainerStarted","Data":"01fd6283d684c3bb1c37ec921dc3598a609c907c390ce0789fe601c1f3771122"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.388377 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" event={"ID":"5d011f07-46e6-4102-bec8-022da84881ac","Type":"ContainerStarted","Data":"c36ef45e3cf49a3da4e60ef8dececc12bb4337bfa39914dc100f0561a5f290ed"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.404724 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" event={"ID":"18c360f0-89d1-46d9-ad99-40cc04b88546","Type":"ContainerStarted","Data":"45a1b02b5546cdbcf49b6bccc89a2611448e4b4982f4a884cc6abb6b6bfbefe5"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.442537 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-bmncc" podStartSLOduration=128.442500782 podStartE2EDuration="2m8.442500782s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:30.441148318 +0000 UTC m=+150.567177881" watchObservedRunningTime="2025-11-25 08:51:30.442500782 +0000 UTC m=+150.568530345" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.453720 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.453827 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.953800704 +0000 UTC m=+151.079830267 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.453955 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.455353 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:30.955341106 +0000 UTC m=+151.081370669 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.467452 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" event={"ID":"62c5310d-d281-4096-b5ae-fbdd368daa44","Type":"ContainerStarted","Data":"ee86707a5960cb7257ddd7a93b74d444a1183a835bb13cbd1eb40c0bb7e04067"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.468167 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-mlf5b" podStartSLOduration=6.468151709 podStartE2EDuration="6.468151709s" podCreationTimestamp="2025-11-25 08:51:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:30.467702761 +0000 UTC m=+150.593732324" watchObservedRunningTime="2025-11-25 08:51:30.468151709 +0000 UTC m=+150.594181272" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.514446 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-9lhfl" podStartSLOduration=128.514408599 podStartE2EDuration="2m8.514408599s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:30.500362157 +0000 UTC m=+150.626391710" watchObservedRunningTime="2025-11-25 08:51:30.514408599 +0000 UTC m=+150.640438162" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.535761 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zffp5" event={"ID":"157b16a5-6638-4f93-b6ae-616cadd9eb21","Type":"ContainerStarted","Data":"3016d867ab7d5bc7f61171ad002d961d6d002b905d601afe8336108de6664e3c"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.551775 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" event={"ID":"333e3fc9-dd6b-4295-9555-1f8c66440d44","Type":"ContainerStarted","Data":"5270ad2cdb5ee3b67336a8288a2395daafaefbbdc899dd762b0b9224ee496bf3"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.554809 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.555074 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:31.055048676 +0000 UTC m=+151.181078239 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.555143 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.555486 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:31.055478873 +0000 UTC m=+151.181508436 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.574930 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" event={"ID":"f72fef44-52e3-46ce-b5c7-1e7d32b6c50b","Type":"ContainerStarted","Data":"5ed22f739d9292a0e75341b670c63b2a4f211d7b7510e2c4f2f50d849c9d2e93"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.574998 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-zffp5" podStartSLOduration=128.574989884 podStartE2EDuration="2m8.574989884s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:30.57240934 +0000 UTC m=+150.698438923" watchObservedRunningTime="2025-11-25 08:51:30.574989884 +0000 UTC m=+150.701019447" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.588692 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" event={"ID":"3776cbbe-bbc8-430b-8db5-881918c75fb2","Type":"ContainerStarted","Data":"79c393c6ddb80ead372950870075f64d3a131a3f40d599c2b586042870812703"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.589778 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.591365 4932 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-ktv2t container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.591403 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" podUID="3776cbbe-bbc8-430b-8db5-881918c75fb2" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.612448 4932 generic.go:334] "Generic (PLEG): container finished" podID="ca7ad64f-5d34-4269-9faf-46bc2e3cab93" containerID="68d7c8e43ca36fc11bb0e1fb85e0b806bafbc454fe81ee3a899a6d5d66068264" exitCode=0 Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.618483 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cr7gs" podStartSLOduration=128.618467644 podStartE2EDuration="2m8.618467644s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:30.617246505 +0000 UTC m=+150.743276068" watchObservedRunningTime="2025-11-25 08:51:30.618467644 +0000 UTC m=+150.744497197" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.656406 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.657538 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:31.157520196 +0000 UTC m=+151.283549759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.668515 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" event={"ID":"ca7ad64f-5d34-4269-9faf-46bc2e3cab93","Type":"ContainerDied","Data":"68d7c8e43ca36fc11bb0e1fb85e0b806bafbc454fe81ee3a899a6d5d66068264"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.698496 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" event={"ID":"a3ec77ce-9c00-4f38-b4e3-1c3a715a321e","Type":"ContainerStarted","Data":"cc2fe62c1d0681334dfe6c663fd12dbd50a77e3b96315943c1b564428fda872a"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.710756 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" event={"ID":"91fee7b5-f700-4555-9ced-964fa79ba338","Type":"ContainerStarted","Data":"556950fe85d2b850f85424a2226e16a465b1bd54e737e03382efd51c73da67b1"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.727609 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" podStartSLOduration=128.72759203 podStartE2EDuration="2m8.72759203s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:30.667378881 +0000 UTC m=+150.793408444" watchObservedRunningTime="2025-11-25 08:51:30.72759203 +0000 UTC m=+150.853621593" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.759317 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.761381 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:31.261364992 +0000 UTC m=+151.387394555 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.766872 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-cf4q4" podStartSLOduration=128.766856231 podStartE2EDuration="2m8.766856231s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:30.765525198 +0000 UTC m=+150.891554761" watchObservedRunningTime="2025-11-25 08:51:30.766856231 +0000 UTC m=+150.892885784" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.767097 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-mtws7" podStartSLOduration=128.767091961 podStartE2EDuration="2m8.767091961s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:30.727889642 +0000 UTC m=+150.853919205" watchObservedRunningTime="2025-11-25 08:51:30.767091961 +0000 UTC m=+150.893121524" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.786798 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-zvnwp" event={"ID":"54acd6a4-8682-4697-9985-c5a0132c9307","Type":"ContainerStarted","Data":"5a67e38cca6adb2f520447324d2c6f31cdc3621a77af0eed45cb56bd1ffcc1b3"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.786838 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-zvnwp" event={"ID":"54acd6a4-8682-4697-9985-c5a0132c9307","Type":"ContainerStarted","Data":"ee9ee6dd8b6ade32feeda3744360526951ad2abdd6df201e7b6d7a2f92450df9"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.810067 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" event={"ID":"b97ce800-9e29-48e1-8047-83e363a75a16","Type":"ContainerStarted","Data":"1dc1f423a75768f61757d30cd8f9ed33924f4bdaf57fac1c6cf0dbaff221c7b2"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.811003 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.843472 4932 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-c49xs container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.843516 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" podUID="b97ce800-9e29-48e1-8047-83e363a75a16" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.847694 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" event={"ID":"21fffc77-e724-4f48-ac20-f21104224241","Type":"ContainerStarted","Data":"ed412641e3d1177ff3c499dcd0153ab70574d8210910cb45c9dcdae44f69e33f"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.865752 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.867010 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:31.366990688 +0000 UTC m=+151.493020251 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.872664 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" event={"ID":"38dc74a2-971b-4e32-a1bd-d63805c021d5","Type":"ContainerStarted","Data":"6039ae9a6f9d3438cc4403599340b14e5dab6b83112e7068c33625ebdf74c37c"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.881843 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sdbr8" event={"ID":"45bcacf1-3275-4634-b7cd-909f2a77bc0e","Type":"ContainerStarted","Data":"1fdb048ed03b86717c2fd750d5442488695c00d9d296d5c9eaad2b0257e4af1e"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.889474 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.898399 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" event={"ID":"e5307db8-5382-4953-bf7e-9b9cc2b0d4c6","Type":"ContainerStarted","Data":"47fb26c79f22ad173b3acf1f187cb53971f3461e2b6a08b2faa28ec9f8198b88"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.909018 4932 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-ksq5j container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.909084 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" podUID="21fffc77-e724-4f48-ac20-f21104224241" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.916813 4932 generic.go:334] "Generic (PLEG): container finished" podID="aa23081e-8667-4a54-a39f-8a8073436dd9" containerID="1a125c6d519b6971cca20ac0a727cc3e0007b1439fcf76f9ba980ffdcb275345" exitCode=0 Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.916894 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" event={"ID":"aa23081e-8667-4a54-a39f-8a8073436dd9","Type":"ContainerDied","Data":"1a125c6d519b6971cca20ac0a727cc3e0007b1439fcf76f9ba980ffdcb275345"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.924703 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-2q4hk" podStartSLOduration=128.924676347 podStartE2EDuration="2m8.924676347s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:30.812451456 +0000 UTC m=+150.938481019" watchObservedRunningTime="2025-11-25 08:51:30.924676347 +0000 UTC m=+151.050705910" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.929658 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-t6qks" event={"ID":"9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e","Type":"ContainerStarted","Data":"3e1de69cf96a74292b0959bee00a326ff400ff098247e05a43b1eab8eab21b8f"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.930127 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.958363 4932 patch_prober.go:28] interesting pod/console-operator-58897d9998-t6qks container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.958420 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-t6qks" podUID="9c7a5d7e-f1e3-4a4e-ad46-b5f5908f628e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.973583 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:30 crc kubenswrapper[4932]: E1125 08:51:30.978084 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:31.478066913 +0000 UTC m=+151.604096476 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.984899 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr" event={"ID":"c88f270c-7870-450c-b838-215ad7b41078","Type":"ContainerStarted","Data":"692eedf358d4e335a00a6f54d3049a9cdc531a004a8b8a8c88fb4fe7686f0255"} Nov 25 08:51:30 crc kubenswrapper[4932]: I1125 08:51:30.984943 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr" event={"ID":"c88f270c-7870-450c-b838-215ad7b41078","Type":"ContainerStarted","Data":"9ed91b334ea61857f32831ada6111b8eb04b5b710eca169a0dace8b96e8be940"} Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.009979 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-zvnwp" podStartSLOduration=7.009962109 podStartE2EDuration="7.009962109s" podCreationTimestamp="2025-11-25 08:51:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.004938988 +0000 UTC m=+151.130968571" watchObservedRunningTime="2025-11-25 08:51:31.009962109 +0000 UTC m=+151.135991672" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.019922 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" event={"ID":"936fd4c5-860b-458e-a7cf-4feef1157c06","Type":"ContainerStarted","Data":"3249e2c9dea24df741379996b9dda8ddce35c9f70903836685b289170837fdd7"} Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.019978 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" event={"ID":"936fd4c5-860b-458e-a7cf-4feef1157c06","Type":"ContainerStarted","Data":"0c25caceee824ecfb78e8d2e7a728f197c561296d6acaac7e0db4c6cd0479fe7"} Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.021012 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.076943 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.077899 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:31.577881867 +0000 UTC m=+151.703911430 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.083172 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" event={"ID":"52c22cc7-413e-4e95-9796-59b6d6908bbf","Type":"ContainerStarted","Data":"824f8d584b55dc1428d1a458725f5b5435304420c5e01463be9589b7bfb83512"} Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.112823 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" event={"ID":"a2c26b18-e2d1-44ff-91bf-44e7a6d1a097","Type":"ContainerStarted","Data":"22b456895b933f131ef59a08142b832ebe41b60d21f0e84d700cf61138cd8341"} Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.123640 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.129386 4932 patch_prober.go:28] interesting pod/router-default-5444994796-bmncc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 08:51:31 crc kubenswrapper[4932]: [-]has-synced failed: reason withheld Nov 25 08:51:31 crc kubenswrapper[4932]: [+]process-running ok Nov 25 08:51:31 crc kubenswrapper[4932]: healthz check failed Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.129440 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bmncc" podUID="61828b1b-84ea-4648-ad3c-ab4c3c592743" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.136520 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" event={"ID":"1446a2a9-8c10-4801-a3ce-2e08d66c81b2","Type":"ContainerStarted","Data":"055e3d4b1e517f02d90b070298793174b506bda2c2e24613734c72ec28744802"} Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.184846 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" event={"ID":"0ccea157-c5a1-4e31-958e-095aa3b77b80","Type":"ContainerStarted","Data":"632915bf26a69ef718a5c1f5663de6a9c983311ca6bd4509a1b6fdcfb5efbc07"} Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.185946 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.187093 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:31.687082317 +0000 UTC m=+151.813111880 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.198993 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" event={"ID":"b500c768-0c0e-4a28-8809-5e181e03bc5c","Type":"ContainerStarted","Data":"34ecbae8bc83b78729e653828b27206669d3f8c6d4eb84969a57adaa032ded91"} Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.202029 4932 patch_prober.go:28] interesting pod/downloads-7954f5f757-qtltp container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.202067 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-qtltp" podUID="1688eab6-98cb-4e8e-97c5-f14a2fa0db76" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.287070 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.293682 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:31.793662292 +0000 UTC m=+151.919691845 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.388948 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.389258 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:31.889246967 +0000 UTC m=+152.015276530 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.477493 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" podStartSLOduration=129.477473617 podStartE2EDuration="2m9.477473617s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.474695436 +0000 UTC m=+151.600724999" watchObservedRunningTime="2025-11-25 08:51:31.477473617 +0000 UTC m=+151.603503180" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.491888 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.492194 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:31.992157635 +0000 UTC m=+152.118187198 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.551096 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr" podStartSLOduration=129.551080893 podStartE2EDuration="2m9.551080893s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.550390295 +0000 UTC m=+151.676419858" watchObservedRunningTime="2025-11-25 08:51:31.551080893 +0000 UTC m=+151.677110456" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.592886 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.593171 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.093159306 +0000 UTC m=+152.219188869 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.624308 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" podStartSLOduration=129.624292572 podStartE2EDuration="2m9.624292572s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.591435067 +0000 UTC m=+151.717464630" watchObservedRunningTime="2025-11-25 08:51:31.624292572 +0000 UTC m=+151.750322135" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.665487 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-t6qks" podStartSLOduration=129.66547217 podStartE2EDuration="2m9.66547217s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.664638047 +0000 UTC m=+151.790667610" watchObservedRunningTime="2025-11-25 08:51:31.66547217 +0000 UTC m=+151.791501733" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.693630 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.693880 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.193865736 +0000 UTC m=+152.319895299 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.693961 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.694205 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.194183449 +0000 UTC m=+152.320213012 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.788252 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" podStartSLOduration=129.788234822 podStartE2EDuration="2m9.788234822s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.787475202 +0000 UTC m=+151.913504765" watchObservedRunningTime="2025-11-25 08:51:31.788234822 +0000 UTC m=+151.914264385" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.788535 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-vmxsf" podStartSLOduration=130.788531584 podStartE2EDuration="2m10.788531584s" podCreationTimestamp="2025-11-25 08:49:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.741744812 +0000 UTC m=+151.867774375" watchObservedRunningTime="2025-11-25 08:51:31.788531584 +0000 UTC m=+151.914561147" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.795444 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.795599 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.295582246 +0000 UTC m=+152.421611809 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.795711 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.795951 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.295945201 +0000 UTC m=+152.421974764 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.842965 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-wdn94" podStartSLOduration=129.842948992 podStartE2EDuration="2m9.842948992s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.808735623 +0000 UTC m=+151.934765186" watchObservedRunningTime="2025-11-25 08:51:31.842948992 +0000 UTC m=+151.968978555" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.844531 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" podStartSLOduration=129.844525145 podStartE2EDuration="2m9.844525145s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.841959252 +0000 UTC m=+151.967988815" watchObservedRunningTime="2025-11-25 08:51:31.844525145 +0000 UTC m=+151.970554708" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.884764 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-t9z5r" podStartSLOduration=129.884746644 podStartE2EDuration="2m9.884746644s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.883658551 +0000 UTC m=+152.009688114" watchObservedRunningTime="2025-11-25 08:51:31.884746644 +0000 UTC m=+152.010776207" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.896991 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.897427 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.397409991 +0000 UTC m=+152.523439554 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.905991 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-wb24h" podStartSLOduration=129.905977044 podStartE2EDuration="2m9.905977044s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.904395771 +0000 UTC m=+152.030425334" watchObservedRunningTime="2025-11-25 08:51:31.905977044 +0000 UTC m=+152.032006607" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.927837 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-9w9dr" podStartSLOduration=129.927821958 podStartE2EDuration="2m9.927821958s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:31.925560658 +0000 UTC m=+152.051590221" watchObservedRunningTime="2025-11-25 08:51:31.927821958 +0000 UTC m=+152.053851521" Nov 25 08:51:31 crc kubenswrapper[4932]: I1125 08:51:31.998867 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:31 crc kubenswrapper[4932]: E1125 08:51:31.999346 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.499333699 +0000 UTC m=+152.625363262 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.100029 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:32 crc kubenswrapper[4932]: E1125 08:51:32.100261 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.600234416 +0000 UTC m=+152.726263979 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.100401 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:32 crc kubenswrapper[4932]: E1125 08:51:32.100697 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.600684694 +0000 UTC m=+152.726714257 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.122089 4932 patch_prober.go:28] interesting pod/router-default-5444994796-bmncc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 08:51:32 crc kubenswrapper[4932]: [-]has-synced failed: reason withheld Nov 25 08:51:32 crc kubenswrapper[4932]: [+]process-running ok Nov 25 08:51:32 crc kubenswrapper[4932]: healthz check failed Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.122141 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bmncc" podUID="61828b1b-84ea-4648-ad3c-ab4c3c592743" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.201699 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:32 crc kubenswrapper[4932]: E1125 08:51:32.201927 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.701889264 +0000 UTC m=+152.827918827 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.202183 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:32 crc kubenswrapper[4932]: E1125 08:51:32.202465 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.702456197 +0000 UTC m=+152.828485760 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.213825 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" event={"ID":"52c22cc7-413e-4e95-9796-59b6d6908bbf","Type":"ContainerStarted","Data":"9c50025960590c3f0032441714b56d78429924a010e866b7d5382dd64271e139"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.216146 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" event={"ID":"62c5310d-d281-4096-b5ae-fbdd368daa44","Type":"ContainerStarted","Data":"d3e6906c3acc6688e3c3b400dc082afd95d1b3c9a7bc72333634bb7ef2d823e2"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.219965 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" event={"ID":"3776cbbe-bbc8-430b-8db5-881918c75fb2","Type":"ContainerStarted","Data":"3425b5f3e5ca8a75d9f08346140291dd8574ad31ccd41e487ceb940a9486a14e"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.220392 4932 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-ktv2t container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.220431 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" podUID="3776cbbe-bbc8-430b-8db5-881918c75fb2" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.224879 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sdbr8" event={"ID":"45bcacf1-3275-4634-b7cd-909f2a77bc0e","Type":"ContainerStarted","Data":"0ea68f734d075d8e8e947ce63193f911bb0ac57b28117d32fb472730c8571467"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.224912 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-sdbr8" event={"ID":"45bcacf1-3275-4634-b7cd-909f2a77bc0e","Type":"ContainerStarted","Data":"a85f898c600e982545dd52aefced0028e424f0ee5721e72eba775b9f15ffad5d"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.225024 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.229121 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" event={"ID":"d78b9892-02be-43e9-b63e-acefa1fff4b3","Type":"ContainerStarted","Data":"a3fd66c21c90eb73b1eac50cdd0dd6d8541d061155e7210438316e0bcd77ed46"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.229328 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.230836 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" event={"ID":"5d011f07-46e6-4102-bec8-022da84881ac","Type":"ContainerStarted","Data":"afe9a4e57ab497a4e595e988c64a1b3a6787db64e71d4865ef14d4da1d0f41f2"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.232868 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" event={"ID":"831022da-9b12-412f-b477-9c592428fe60","Type":"ContainerStarted","Data":"5b6293f181ab610ee2d83ed8c8785a8cbbb2b6af71ad7e490a4ea0a42185e018"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.234697 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"051d07d595d6ec32a3911575f7da105a6188c2b6a4ea1346c683c8bdac0822cf"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.234726 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"513c0e1a4c3f55495f612f78d6b04bd3ad1587e3899ebdd96cb11f62cde73288"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.236402 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" event={"ID":"aa23081e-8667-4a54-a39f-8a8073436dd9","Type":"ContainerStarted","Data":"6e0ed6043e484a801118a24caf00cc01d46d104ff4c3b216f79c4ec36863a33a"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.236837 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.238434 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" event={"ID":"38dc74a2-971b-4e32-a1bd-d63805c021d5","Type":"ContainerStarted","Data":"866cb4b9e9620bbdf8ebeca67f7d138aa6766b251f30339fc59694a58f19688c"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.238637 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.240037 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"73b15a8daca1a9741fe1c2985f86c42b534caaf30f9502fb9e1b6debd216ea4c"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.240066 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"e529d9bc48d65fd6527d4c6fa4e4ccd49b3168a2f76d2233f3a2f5aa76c62b18"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.240378 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.247163 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" event={"ID":"936fd4c5-860b-458e-a7cf-4feef1157c06","Type":"ContainerStarted","Data":"dc30f72ededbb0e0a3438ec8f7d7b4f011b356f29800e8969690d571c4f5f0b1"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.249987 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" event={"ID":"9945a2a9-7f64-4d7c-bab3-aca70803734d","Type":"ContainerStarted","Data":"c914243f42bf6d21108e75ee9a9933fe25f529ee52ab0edaeac06f93c8b7dfca"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.260393 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-k6wpr" event={"ID":"c88f270c-7870-450c-b838-215ad7b41078","Type":"ContainerStarted","Data":"fde0734802b0ea59f27c690243567e1ebc5e88b7f2d7d499e33153084e095897"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.262694 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" event={"ID":"c235a252-2ba4-4b5f-b04a-ca53a476ebde","Type":"ContainerStarted","Data":"a0162073c202c49b7bbed28b5f1766e9461a69f1283cb46abe9ec6a66e2e79e1"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.265830 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"3d57bc8ea33a3fe4e8abaac5973933db4b97d4fcafc74483dfd18c60c4faa749"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.265848 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"fdb9cbd95997bbd18a26234de2870c2c3369a217aada56d6745b04b72d88ad53"} Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.272592 4932 patch_prober.go:28] interesting pod/downloads-7954f5f757-qtltp container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.272663 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-qtltp" podUID="1688eab6-98cb-4e8e-97c5-f14a2fa0db76" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.281796 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-c49xs" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.287399 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-42gdf" podStartSLOduration=130.287380505 podStartE2EDuration="2m10.287380505s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:32.253246499 +0000 UTC m=+152.379276052" watchObservedRunningTime="2025-11-25 08:51:32.287380505 +0000 UTC m=+152.413410068" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.303127 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:32 crc kubenswrapper[4932]: E1125 08:51:32.305021 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.80500689 +0000 UTC m=+152.931036453 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.314574 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-t6qks" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.314587 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" podStartSLOduration=130.314574613 podStartE2EDuration="2m10.314574613s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:32.289637365 +0000 UTC m=+152.415666928" watchObservedRunningTime="2025-11-25 08:51:32.314574613 +0000 UTC m=+152.440604176" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.323327 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-f8qtz" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.324718 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.336172 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-sdbr8" podStartSLOduration=8.336156117 podStartE2EDuration="8.336156117s" podCreationTimestamp="2025-11-25 08:51:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:32.314725669 +0000 UTC m=+152.440755232" watchObservedRunningTime="2025-11-25 08:51:32.336156117 +0000 UTC m=+152.462185680" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.337304 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" podStartSLOduration=130.337299273 podStartE2EDuration="2m10.337299273s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:32.335592924 +0000 UTC m=+152.461622487" watchObservedRunningTime="2025-11-25 08:51:32.337299273 +0000 UTC m=+152.463328836" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.398894 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" podStartSLOduration=130.398877657 podStartE2EDuration="2m10.398877657s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:32.395638267 +0000 UTC m=+152.521667830" watchObservedRunningTime="2025-11-25 08:51:32.398877657 +0000 UTC m=+152.524907210" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.407417 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:32 crc kubenswrapper[4932]: E1125 08:51:32.413234 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:32.907760142 +0000 UTC m=+153.033789705 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.469662 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pbnhb" podStartSLOduration=130.469645668 podStartE2EDuration="2m10.469645668s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:32.468575886 +0000 UTC m=+152.594605449" watchObservedRunningTime="2025-11-25 08:51:32.469645668 +0000 UTC m=+152.595675231" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.507508 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-d8tft" podStartSLOduration=130.507492913 podStartE2EDuration="2m10.507492913s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:32.507119788 +0000 UTC m=+152.633149351" watchObservedRunningTime="2025-11-25 08:51:32.507492913 +0000 UTC m=+152.633522476" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.508700 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:32 crc kubenswrapper[4932]: E1125 08:51:32.508997 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.008964552 +0000 UTC m=+153.134994115 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.611647 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:32 crc kubenswrapper[4932]: E1125 08:51:32.612283 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.112256925 +0000 UTC m=+153.238286488 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.713376 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:32 crc kubenswrapper[4932]: E1125 08:51:32.713687 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.213671863 +0000 UTC m=+153.339701426 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.739812 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-sbxc8" podStartSLOduration=130.739795129 podStartE2EDuration="2m10.739795129s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:32.688140932 +0000 UTC m=+152.814170495" watchObservedRunningTime="2025-11-25 08:51:32.739795129 +0000 UTC m=+152.865824692" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.753039 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.782416 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-tj2jk" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.814491 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-config-volume\") pod \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.814590 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-secret-volume\") pod \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.814644 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6p6wr\" (UniqueName: \"kubernetes.io/projected/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-kube-api-access-6p6wr\") pod \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\" (UID: \"ca7ad64f-5d34-4269-9faf-46bc2e3cab93\") " Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.814886 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:32 crc kubenswrapper[4932]: E1125 08:51:32.815137 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.315127513 +0000 UTC m=+153.441157076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.815547 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-config-volume" (OuterVolumeSpecName: "config-volume") pod "ca7ad64f-5d34-4269-9faf-46bc2e3cab93" (UID: "ca7ad64f-5d34-4269-9faf-46bc2e3cab93"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.824426 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-kube-api-access-6p6wr" (OuterVolumeSpecName: "kube-api-access-6p6wr") pod "ca7ad64f-5d34-4269-9faf-46bc2e3cab93" (UID: "ca7ad64f-5d34-4269-9faf-46bc2e3cab93"). InnerVolumeSpecName "kube-api-access-6p6wr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.834486 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ca7ad64f-5d34-4269-9faf-46bc2e3cab93" (UID: "ca7ad64f-5d34-4269-9faf-46bc2e3cab93"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.915556 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.915877 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6p6wr\" (UniqueName: \"kubernetes.io/projected/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-kube-api-access-6p6wr\") on node \"crc\" DevicePath \"\"" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.915888 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 08:51:32 crc kubenswrapper[4932]: I1125 08:51:32.915896 4932 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ca7ad64f-5d34-4269-9faf-46bc2e3cab93-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 08:51:32 crc kubenswrapper[4932]: E1125 08:51:32.915955 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.415940947 +0000 UTC m=+153.541970510 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.017612 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.018361 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.518349695 +0000 UTC m=+153.644379258 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.035295 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tjm2s"] Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.035483 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca7ad64f-5d34-4269-9faf-46bc2e3cab93" containerName="collect-profiles" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.035496 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca7ad64f-5d34-4269-9faf-46bc2e3cab93" containerName="collect-profiles" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.035592 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca7ad64f-5d34-4269-9faf-46bc2e3cab93" containerName="collect-profiles" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.036226 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.038155 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.050702 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tjm2s"] Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.087604 4932 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.118945 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.119115 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.619090316 +0000 UTC m=+153.745119879 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.119444 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4b4mx\" (UniqueName: \"kubernetes.io/projected/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-kube-api-access-4b4mx\") pod \"certified-operators-tjm2s\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.119526 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-catalog-content\") pod \"certified-operators-tjm2s\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.119627 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-utilities\") pod \"certified-operators-tjm2s\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.119780 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.120139 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.620116237 +0000 UTC m=+153.746145800 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.122572 4932 patch_prober.go:28] interesting pod/router-default-5444994796-bmncc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 08:51:33 crc kubenswrapper[4932]: [-]has-synced failed: reason withheld Nov 25 08:51:33 crc kubenswrapper[4932]: [+]process-running ok Nov 25 08:51:33 crc kubenswrapper[4932]: healthz check failed Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.122782 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bmncc" podUID="61828b1b-84ea-4648-ad3c-ab4c3c592743" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.208357 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-drds8"] Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.209168 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-drds8" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.218482 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.220540 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.220672 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.72064694 +0000 UTC m=+153.846676493 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.220785 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4b4mx\" (UniqueName: \"kubernetes.io/projected/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-kube-api-access-4b4mx\") pod \"certified-operators-tjm2s\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.220834 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-catalog-content\") pod \"certified-operators-tjm2s\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.220873 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-utilities\") pod \"certified-operators-tjm2s\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.220945 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.221294 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.721280446 +0000 UTC m=+153.847310019 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.221310 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-catalog-content\") pod \"certified-operators-tjm2s\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.221369 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-utilities\") pod \"certified-operators-tjm2s\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.225860 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-drds8"] Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.240614 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4b4mx\" (UniqueName: \"kubernetes.io/projected/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-kube-api-access-4b4mx\") pod \"certified-operators-tjm2s\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.270248 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" event={"ID":"ca7ad64f-5d34-4269-9faf-46bc2e3cab93","Type":"ContainerDied","Data":"4e5c053228d577b4ffb068f7252c437839d4d2611985d3437e8cb2f030c9def9"} Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.270292 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e5c053228d577b4ffb068f7252c437839d4d2611985d3437e8cb2f030c9def9" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.270354 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.272460 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" event={"ID":"52c22cc7-413e-4e95-9796-59b6d6908bbf","Type":"ContainerStarted","Data":"c5598699c54ff5514a676ac942c1e5649fa753ad864148fa0d665013798f5c3a"} Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.272539 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" event={"ID":"52c22cc7-413e-4e95-9796-59b6d6908bbf","Type":"ContainerStarted","Data":"bee4c9719164b73bd12a003464bfeb59f3887b887fb835f2439224cd1fab0f1a"} Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.272554 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" event={"ID":"52c22cc7-413e-4e95-9796-59b6d6908bbf","Type":"ContainerStarted","Data":"5e80ff7d41c026fe9e4564fc4cde28a22de326ffa701d29d722b0b9a18300a6a"} Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.273145 4932 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-ktv2t container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.273215 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" podUID="3776cbbe-bbc8-430b-8db5-881918c75fb2" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.298490 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-cxlvv" podStartSLOduration=9.298468994 podStartE2EDuration="9.298468994s" podCreationTimestamp="2025-11-25 08:51:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:33.295387511 +0000 UTC m=+153.421417084" watchObservedRunningTime="2025-11-25 08:51:33.298468994 +0000 UTC m=+153.424498557" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.321662 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.321976 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.821954004 +0000 UTC m=+153.947983577 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.322454 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.322669 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-utilities\") pod \"community-operators-drds8\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " pod="openshift-marketplace/community-operators-drds8" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.323016 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-catalog-content\") pod \"community-operators-drds8\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " pod="openshift-marketplace/community-operators-drds8" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.323438 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rszq4\" (UniqueName: \"kubernetes.io/projected/c944be15-8b3b-417f-9640-2c926704f541-kube-api-access-rszq4\") pod \"community-operators-drds8\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " pod="openshift-marketplace/community-operators-drds8" Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.325289 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.825275107 +0000 UTC m=+153.951304670 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.355621 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.408609 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sfx28"] Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.418682 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.424350 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sfx28"] Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.424620 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.424897 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.924875042 +0000 UTC m=+154.050904595 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.425048 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.425155 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-utilities\") pod \"community-operators-drds8\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " pod="openshift-marketplace/community-operators-drds8" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.425319 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-catalog-content\") pod \"community-operators-drds8\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " pod="openshift-marketplace/community-operators-drds8" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.425449 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rszq4\" (UniqueName: \"kubernetes.io/projected/c944be15-8b3b-417f-9640-2c926704f541-kube-api-access-rszq4\") pod \"community-operators-drds8\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " pod="openshift-marketplace/community-operators-drds8" Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.425549 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:33.925535809 +0000 UTC m=+154.051565372 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.425840 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-catalog-content\") pod \"community-operators-drds8\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " pod="openshift-marketplace/community-operators-drds8" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.425897 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-utilities\") pod \"community-operators-drds8\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " pod="openshift-marketplace/community-operators-drds8" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.458519 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rszq4\" (UniqueName: \"kubernetes.io/projected/c944be15-8b3b-417f-9640-2c926704f541-kube-api-access-rszq4\") pod \"community-operators-drds8\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " pod="openshift-marketplace/community-operators-drds8" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.526542 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-drds8" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.529828 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.530050 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-utilities\") pod \"certified-operators-sfx28\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.530100 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f6bl\" (UniqueName: \"kubernetes.io/projected/66bd838b-f358-4404-9c27-00bdffad355e-kube-api-access-7f6bl\") pod \"certified-operators-sfx28\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.530120 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-catalog-content\") pod \"certified-operators-sfx28\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.530215 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:34.030161105 +0000 UTC m=+154.156190668 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.530378 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.530913 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:34.030901415 +0000 UTC m=+154.156930978 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.597946 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tjm2s"] Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.607651 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x52wc"] Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.608611 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.620477 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x52wc"] Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.631332 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.631420 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:34.131402087 +0000 UTC m=+154.257431650 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.631562 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f6bl\" (UniqueName: \"kubernetes.io/projected/66bd838b-f358-4404-9c27-00bdffad355e-kube-api-access-7f6bl\") pod \"certified-operators-sfx28\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.631588 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-catalog-content\") pod \"certified-operators-sfx28\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.631654 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.631675 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-utilities\") pod \"certified-operators-sfx28\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.632007 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:34.131999601 +0000 UTC m=+154.258029164 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.651986 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f6bl\" (UniqueName: \"kubernetes.io/projected/66bd838b-f358-4404-9c27-00bdffad355e-kube-api-access-7f6bl\") pod \"certified-operators-sfx28\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.706064 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-utilities\") pod \"certified-operators-sfx28\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.706627 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-catalog-content\") pod \"certified-operators-sfx28\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.733815 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.734264 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-catalog-content\") pod \"community-operators-x52wc\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.734366 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-utilities\") pod \"community-operators-x52wc\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.734417 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kn6vh\" (UniqueName: \"kubernetes.io/projected/e63c0208-0bee-4882-b439-76766480e602-kube-api-access-kn6vh\") pod \"community-operators-x52wc\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.734690 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.734693 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 08:51:34.234672309 +0000 UTC m=+154.360701882 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.764408 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-drds8"] Nov 25 08:51:33 crc kubenswrapper[4932]: W1125 08:51:33.785552 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc944be15_8b3b_417f_9640_2c926704f541.slice/crio-c5fb013ce43b99e90d0d7247406197519d9d4393d3f400e54b840a0a52f13583 WatchSource:0}: Error finding container c5fb013ce43b99e90d0d7247406197519d9d4393d3f400e54b840a0a52f13583: Status 404 returned error can't find the container with id c5fb013ce43b99e90d0d7247406197519d9d4393d3f400e54b840a0a52f13583 Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.835353 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.835629 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-utilities\") pod \"community-operators-x52wc\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.835675 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kn6vh\" (UniqueName: \"kubernetes.io/projected/e63c0208-0bee-4882-b439-76766480e602-kube-api-access-kn6vh\") pod \"community-operators-x52wc\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.835724 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-catalog-content\") pod \"community-operators-x52wc\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.836264 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-catalog-content\") pod \"community-operators-x52wc\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:51:33 crc kubenswrapper[4932]: E1125 08:51:33.836289 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 08:51:34.336273425 +0000 UTC m=+154.462302988 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-mgqrs" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.836334 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-utilities\") pod \"community-operators-x52wc\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.860424 4932 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-25T08:51:33.087634118Z","Handler":null,"Name":""} Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.865329 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kn6vh\" (UniqueName: \"kubernetes.io/projected/e63c0208-0bee-4882-b439-76766480e602-kube-api-access-kn6vh\") pod \"community-operators-x52wc\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.872874 4932 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.872904 4932 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.925150 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.925951 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.928856 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.929015 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.931227 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.931912 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.939668 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.944842 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 08:51:33 crc kubenswrapper[4932]: I1125 08:51:33.967436 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sfx28"] Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.041572 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.041785 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/468032a7-4de1-472c-b1da-d60cd0879891-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"468032a7-4de1-472c-b1da-d60cd0879891\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.041822 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/468032a7-4de1-472c-b1da-d60cd0879891-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"468032a7-4de1-472c-b1da-d60cd0879891\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.045032 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.045074 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.089969 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-mgqrs\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.124915 4932 patch_prober.go:28] interesting pod/router-default-5444994796-bmncc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 08:51:34 crc kubenswrapper[4932]: [-]has-synced failed: reason withheld Nov 25 08:51:34 crc kubenswrapper[4932]: [+]process-running ok Nov 25 08:51:34 crc kubenswrapper[4932]: healthz check failed Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.124964 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bmncc" podUID="61828b1b-84ea-4648-ad3c-ab4c3c592743" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.144576 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/468032a7-4de1-472c-b1da-d60cd0879891-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"468032a7-4de1-472c-b1da-d60cd0879891\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.144647 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/468032a7-4de1-472c-b1da-d60cd0879891-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"468032a7-4de1-472c-b1da-d60cd0879891\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.144829 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/468032a7-4de1-472c-b1da-d60cd0879891-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"468032a7-4de1-472c-b1da-d60cd0879891\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.167332 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/468032a7-4de1-472c-b1da-d60cd0879891-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"468032a7-4de1-472c-b1da-d60cd0879891\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.170612 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x52wc"] Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.255660 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.294067 4932 generic.go:334] "Generic (PLEG): container finished" podID="66bd838b-f358-4404-9c27-00bdffad355e" containerID="e3c5cafcb63ec5a271698a7785c1ab63dd2a9ebaa3bf1c2f49bf0f03a1d2c13f" exitCode=0 Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.294123 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sfx28" event={"ID":"66bd838b-f358-4404-9c27-00bdffad355e","Type":"ContainerDied","Data":"e3c5cafcb63ec5a271698a7785c1ab63dd2a9ebaa3bf1c2f49bf0f03a1d2c13f"} Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.294147 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sfx28" event={"ID":"66bd838b-f358-4404-9c27-00bdffad355e","Type":"ContainerStarted","Data":"e24fe6a16c69ecf6bea925bddcbbee721e35aa8261c8048dae2a70aab496df72"} Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.296670 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.297629 4932 generic.go:334] "Generic (PLEG): container finished" podID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" containerID="14ba72eff3f817c7968890efaa19b3910341a7dedab2ef3902345fbcfbd0919f" exitCode=0 Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.297701 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjm2s" event={"ID":"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd","Type":"ContainerDied","Data":"14ba72eff3f817c7968890efaa19b3910341a7dedab2ef3902345fbcfbd0919f"} Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.297732 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjm2s" event={"ID":"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd","Type":"ContainerStarted","Data":"5593a0fad723c29fa273ff97924199acb949b473ee59467a55d4682924e7941b"} Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.302992 4932 generic.go:334] "Generic (PLEG): container finished" podID="c944be15-8b3b-417f-9640-2c926704f541" containerID="889d2be148af28811a1e15f3301374ddd550b46329cf414c02fa81a707decb2b" exitCode=0 Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.303356 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drds8" event={"ID":"c944be15-8b3b-417f-9640-2c926704f541","Type":"ContainerDied","Data":"889d2be148af28811a1e15f3301374ddd550b46329cf414c02fa81a707decb2b"} Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.303382 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drds8" event={"ID":"c944be15-8b3b-417f-9640-2c926704f541","Type":"ContainerStarted","Data":"c5fb013ce43b99e90d0d7247406197519d9d4393d3f400e54b840a0a52f13583"} Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.305773 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x52wc" event={"ID":"e63c0208-0bee-4882-b439-76766480e602","Type":"ContainerStarted","Data":"7f6c1eb0856efab1e313396cf89bbbb40c87929e784512854d0ff115aafb0c82"} Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.320383 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-c2l58" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.325371 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.572981 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mgqrs"] Nov 25 08:51:34 crc kubenswrapper[4932]: W1125 08:51:34.582047 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbba1e90_6d95_4837_b776_6a03a2e7901a.slice/crio-162ad2a51412fc1989531220753a3c260d76a66da6d8dc1c1791ac4758d24f47 WatchSource:0}: Error finding container 162ad2a51412fc1989531220753a3c260d76a66da6d8dc1c1791ac4758d24f47: Status 404 returned error can't find the container with id 162ad2a51412fc1989531220753a3c260d76a66da6d8dc1c1791ac4758d24f47 Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.592086 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 08:51:34 crc kubenswrapper[4932]: W1125 08:51:34.608466 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod468032a7_4de1_472c_b1da_d60cd0879891.slice/crio-701febf459cf6306a93acfd6dcb4abdb0c27621880cdf6483b8c8fb5d2cdac38 WatchSource:0}: Error finding container 701febf459cf6306a93acfd6dcb4abdb0c27621880cdf6483b8c8fb5d2cdac38: Status 404 returned error can't find the container with id 701febf459cf6306a93acfd6dcb4abdb0c27621880cdf6483b8c8fb5d2cdac38 Nov 25 08:51:34 crc kubenswrapper[4932]: I1125 08:51:34.614720 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.008547 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5xtp9"] Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.009756 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.012802 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.017373 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xtp9"] Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.056075 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx5l6\" (UniqueName: \"kubernetes.io/projected/df33346c-a298-4a78-b566-70b2a11eb307-kube-api-access-hx5l6\") pod \"redhat-marketplace-5xtp9\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.056153 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-utilities\") pod \"redhat-marketplace-5xtp9\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.056237 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-catalog-content\") pod \"redhat-marketplace-5xtp9\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.123393 4932 patch_prober.go:28] interesting pod/router-default-5444994796-bmncc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 08:51:35 crc kubenswrapper[4932]: [-]has-synced failed: reason withheld Nov 25 08:51:35 crc kubenswrapper[4932]: [+]process-running ok Nov 25 08:51:35 crc kubenswrapper[4932]: healthz check failed Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.123497 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bmncc" podUID="61828b1b-84ea-4648-ad3c-ab4c3c592743" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.157243 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx5l6\" (UniqueName: \"kubernetes.io/projected/df33346c-a298-4a78-b566-70b2a11eb307-kube-api-access-hx5l6\") pod \"redhat-marketplace-5xtp9\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.157587 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-utilities\") pod \"redhat-marketplace-5xtp9\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.157836 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-catalog-content\") pod \"redhat-marketplace-5xtp9\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.158337 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-catalog-content\") pod \"redhat-marketplace-5xtp9\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.158373 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-utilities\") pod \"redhat-marketplace-5xtp9\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.190453 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx5l6\" (UniqueName: \"kubernetes.io/projected/df33346c-a298-4a78-b566-70b2a11eb307-kube-api-access-hx5l6\") pod \"redhat-marketplace-5xtp9\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.314088 4932 generic.go:334] "Generic (PLEG): container finished" podID="e63c0208-0bee-4882-b439-76766480e602" containerID="974052f58c1f397df2ed42ed095fd7105639bb73ff21f92898cc746edc67e99a" exitCode=0 Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.314165 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x52wc" event={"ID":"e63c0208-0bee-4882-b439-76766480e602","Type":"ContainerDied","Data":"974052f58c1f397df2ed42ed095fd7105639bb73ff21f92898cc746edc67e99a"} Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.317278 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"468032a7-4de1-472c-b1da-d60cd0879891","Type":"ContainerStarted","Data":"e35cb34a9d1123bf0f4ceb3451928b0dbb36a154b3bcf57b565d376393d959a0"} Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.317324 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.317343 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"468032a7-4de1-472c-b1da-d60cd0879891","Type":"ContainerStarted","Data":"701febf459cf6306a93acfd6dcb4abdb0c27621880cdf6483b8c8fb5d2cdac38"} Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.317439 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.325634 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" event={"ID":"dbba1e90-6d95-4837-b776-6a03a2e7901a","Type":"ContainerStarted","Data":"593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c"} Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.325690 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" event={"ID":"dbba1e90-6d95-4837-b776-6a03a2e7901a","Type":"ContainerStarted","Data":"162ad2a51412fc1989531220753a3c260d76a66da6d8dc1c1791ac4758d24f47"} Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.328935 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.337754 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.356025 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.356003497 podStartE2EDuration="2.356003497s" podCreationTimestamp="2025-11-25 08:51:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:35.352557859 +0000 UTC m=+155.478587422" watchObservedRunningTime="2025-11-25 08:51:35.356003497 +0000 UTC m=+155.482033070" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.379962 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" podStartSLOduration=133.379936455 podStartE2EDuration="2m13.379936455s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:51:35.373388733 +0000 UTC m=+155.499418316" watchObservedRunningTime="2025-11-25 08:51:35.379936455 +0000 UTC m=+155.505966018" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.421691 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z826r"] Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.422942 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.426367 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z826r"] Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.563352 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-catalog-content\") pod \"redhat-marketplace-z826r\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.563402 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7zls\" (UniqueName: \"kubernetes.io/projected/6955d562-1cac-40e0-9a6f-50aa7151560e-kube-api-access-v7zls\") pod \"redhat-marketplace-z826r\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.563501 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-utilities\") pod \"redhat-marketplace-z826r\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.628291 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xtp9"] Nov 25 08:51:35 crc kubenswrapper[4932]: W1125 08:51:35.661680 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf33346c_a298_4a78_b566_70b2a11eb307.slice/crio-8841cf54801260f6337acfcaa438460336160a19066a98c56bd10ceec27e21b0 WatchSource:0}: Error finding container 8841cf54801260f6337acfcaa438460336160a19066a98c56bd10ceec27e21b0: Status 404 returned error can't find the container with id 8841cf54801260f6337acfcaa438460336160a19066a98c56bd10ceec27e21b0 Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.666510 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-utilities\") pod \"redhat-marketplace-z826r\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.666574 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-catalog-content\") pod \"redhat-marketplace-z826r\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.666609 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7zls\" (UniqueName: \"kubernetes.io/projected/6955d562-1cac-40e0-9a6f-50aa7151560e-kube-api-access-v7zls\") pod \"redhat-marketplace-z826r\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.667029 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-utilities\") pod \"redhat-marketplace-z826r\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.667273 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-catalog-content\") pod \"redhat-marketplace-z826r\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.691605 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7zls\" (UniqueName: \"kubernetes.io/projected/6955d562-1cac-40e0-9a6f-50aa7151560e-kube-api-access-v7zls\") pod \"redhat-marketplace-z826r\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.787649 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:51:35 crc kubenswrapper[4932]: I1125 08:51:35.976919 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z826r"] Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.121834 4932 patch_prober.go:28] interesting pod/router-default-5444994796-bmncc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 08:51:36 crc kubenswrapper[4932]: [-]has-synced failed: reason withheld Nov 25 08:51:36 crc kubenswrapper[4932]: [+]process-running ok Nov 25 08:51:36 crc kubenswrapper[4932]: healthz check failed Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.121887 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bmncc" podUID="61828b1b-84ea-4648-ad3c-ab4c3c592743" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.213915 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6h2tt"] Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.215281 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.217169 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.220885 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6h2tt"] Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.340388 4932 generic.go:334] "Generic (PLEG): container finished" podID="df33346c-a298-4a78-b566-70b2a11eb307" containerID="4fa7771dc0a83053f45aa0de64c44b226d77f5ddc4a00078517ab63749e772d8" exitCode=0 Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.340448 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xtp9" event={"ID":"df33346c-a298-4a78-b566-70b2a11eb307","Type":"ContainerDied","Data":"4fa7771dc0a83053f45aa0de64c44b226d77f5ddc4a00078517ab63749e772d8"} Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.340472 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xtp9" event={"ID":"df33346c-a298-4a78-b566-70b2a11eb307","Type":"ContainerStarted","Data":"8841cf54801260f6337acfcaa438460336160a19066a98c56bd10ceec27e21b0"} Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.343512 4932 generic.go:334] "Generic (PLEG): container finished" podID="468032a7-4de1-472c-b1da-d60cd0879891" containerID="e35cb34a9d1123bf0f4ceb3451928b0dbb36a154b3bcf57b565d376393d959a0" exitCode=0 Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.343767 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"468032a7-4de1-472c-b1da-d60cd0879891","Type":"ContainerDied","Data":"e35cb34a9d1123bf0f4ceb3451928b0dbb36a154b3bcf57b565d376393d959a0"} Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.352101 4932 generic.go:334] "Generic (PLEG): container finished" podID="6955d562-1cac-40e0-9a6f-50aa7151560e" containerID="88bb3701e0e18360efd7b6a9c4d69d99e88976a8dc8a367a485045cf0e6cbb10" exitCode=0 Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.352308 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z826r" event={"ID":"6955d562-1cac-40e0-9a6f-50aa7151560e","Type":"ContainerDied","Data":"88bb3701e0e18360efd7b6a9c4d69d99e88976a8dc8a367a485045cf0e6cbb10"} Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.352336 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z826r" event={"ID":"6955d562-1cac-40e0-9a6f-50aa7151560e","Type":"ContainerStarted","Data":"c6e8d18606579c1345dd29556cc70a8399da089ff536853220d022470b514a98"} Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.352656 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.357872 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-zffp5" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.375014 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-utilities\") pod \"redhat-operators-6h2tt\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.375106 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-catalog-content\") pod \"redhat-operators-6h2tt\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.375132 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mncxn\" (UniqueName: \"kubernetes.io/projected/cec571af-8e80-4ddb-8218-deab898f34cd-kube-api-access-mncxn\") pod \"redhat-operators-6h2tt\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.406104 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.406164 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.407326 4932 patch_prober.go:28] interesting pod/console-f9d7485db-5kf8q container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.407424 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-5kf8q" podUID="a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.484533 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-utilities\") pod \"redhat-operators-6h2tt\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.484622 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-catalog-content\") pod \"redhat-operators-6h2tt\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.484641 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mncxn\" (UniqueName: \"kubernetes.io/projected/cec571af-8e80-4ddb-8218-deab898f34cd-kube-api-access-mncxn\") pod \"redhat-operators-6h2tt\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.487341 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-utilities\") pod \"redhat-operators-6h2tt\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.488282 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-catalog-content\") pod \"redhat-operators-6h2tt\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.522472 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mncxn\" (UniqueName: \"kubernetes.io/projected/cec571af-8e80-4ddb-8218-deab898f34cd-kube-api-access-mncxn\") pod \"redhat-operators-6h2tt\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.537773 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.591454 4932 patch_prober.go:28] interesting pod/downloads-7954f5f757-qtltp container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.591500 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-qtltp" podUID="1688eab6-98cb-4e8e-97c5-f14a2fa0db76" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.591906 4932 patch_prober.go:28] interesting pod/downloads-7954f5f757-qtltp container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" start-of-body= Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.591930 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-qtltp" podUID="1688eab6-98cb-4e8e-97c5-f14a2fa0db76" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.9:8080/\": dial tcp 10.217.0.9:8080: connect: connection refused" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.608489 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-j99b6"] Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.609918 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.626469 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j99b6"] Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.690697 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.691096 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.700978 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.785851 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6h2tt"] Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.787762 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-catalog-content\") pod \"redhat-operators-j99b6\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.787805 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-utilities\") pod \"redhat-operators-j99b6\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.787894 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpdfv\" (UniqueName: \"kubernetes.io/projected/6574608b-c907-4cd1-84b8-6778c893ede3-kube-api-access-qpdfv\") pod \"redhat-operators-j99b6\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:51:36 crc kubenswrapper[4932]: W1125 08:51:36.851143 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcec571af_8e80_4ddb_8218_deab898f34cd.slice/crio-9eea03411956af59407bf9215d231de939a25b415eb4088d2a46e8514098b63d WatchSource:0}: Error finding container 9eea03411956af59407bf9215d231de939a25b415eb4088d2a46e8514098b63d: Status 404 returned error can't find the container with id 9eea03411956af59407bf9215d231de939a25b415eb4088d2a46e8514098b63d Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.889492 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpdfv\" (UniqueName: \"kubernetes.io/projected/6574608b-c907-4cd1-84b8-6778c893ede3-kube-api-access-qpdfv\") pod \"redhat-operators-j99b6\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.889628 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-catalog-content\") pod \"redhat-operators-j99b6\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.889662 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-utilities\") pod \"redhat-operators-j99b6\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.890111 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-utilities\") pod \"redhat-operators-j99b6\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.890328 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-catalog-content\") pod \"redhat-operators-j99b6\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.911394 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpdfv\" (UniqueName: \"kubernetes.io/projected/6574608b-c907-4cd1-84b8-6778c893ede3-kube-api-access-qpdfv\") pod \"redhat-operators-j99b6\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:51:36 crc kubenswrapper[4932]: I1125 08:51:36.929778 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.119799 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.124247 4932 patch_prober.go:28] interesting pod/router-default-5444994796-bmncc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 08:51:37 crc kubenswrapper[4932]: [-]has-synced failed: reason withheld Nov 25 08:51:37 crc kubenswrapper[4932]: [+]process-running ok Nov 25 08:51:37 crc kubenswrapper[4932]: healthz check failed Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.124491 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bmncc" podUID="61828b1b-84ea-4648-ad3c-ab4c3c592743" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.185664 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.185712 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.223859 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.314486 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j99b6"] Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.370710 4932 generic.go:334] "Generic (PLEG): container finished" podID="cec571af-8e80-4ddb-8218-deab898f34cd" containerID="18413151967a1c9c3ba8ec6f80b50cd938602707e79267a9b2a6be081a2a83c2" exitCode=0 Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.370846 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6h2tt" event={"ID":"cec571af-8e80-4ddb-8218-deab898f34cd","Type":"ContainerDied","Data":"18413151967a1c9c3ba8ec6f80b50cd938602707e79267a9b2a6be081a2a83c2"} Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.370948 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6h2tt" event={"ID":"cec571af-8e80-4ddb-8218-deab898f34cd","Type":"ContainerStarted","Data":"9eea03411956af59407bf9215d231de939a25b415eb4088d2a46e8514098b63d"} Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.380171 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4g2lq" Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.810923 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.922186 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/468032a7-4de1-472c-b1da-d60cd0879891-kube-api-access\") pod \"468032a7-4de1-472c-b1da-d60cd0879891\" (UID: \"468032a7-4de1-472c-b1da-d60cd0879891\") " Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.922486 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/468032a7-4de1-472c-b1da-d60cd0879891-kubelet-dir\") pod \"468032a7-4de1-472c-b1da-d60cd0879891\" (UID: \"468032a7-4de1-472c-b1da-d60cd0879891\") " Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.922621 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/468032a7-4de1-472c-b1da-d60cd0879891-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "468032a7-4de1-472c-b1da-d60cd0879891" (UID: "468032a7-4de1-472c-b1da-d60cd0879891"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.922712 4932 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/468032a7-4de1-472c-b1da-d60cd0879891-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 08:51:37 crc kubenswrapper[4932]: I1125 08:51:37.928299 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/468032a7-4de1-472c-b1da-d60cd0879891-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "468032a7-4de1-472c-b1da-d60cd0879891" (UID: "468032a7-4de1-472c-b1da-d60cd0879891"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.024386 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/468032a7-4de1-472c-b1da-d60cd0879891-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.123037 4932 patch_prober.go:28] interesting pod/router-default-5444994796-bmncc container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 08:51:38 crc kubenswrapper[4932]: [-]has-synced failed: reason withheld Nov 25 08:51:38 crc kubenswrapper[4932]: [+]process-running ok Nov 25 08:51:38 crc kubenswrapper[4932]: healthz check failed Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.123089 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bmncc" podUID="61828b1b-84ea-4648-ad3c-ab4c3c592743" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.385530 4932 generic.go:334] "Generic (PLEG): container finished" podID="6574608b-c907-4cd1-84b8-6778c893ede3" containerID="45610aace31f173ee51c49c9e284813f7e44e2c3f6532f8e67ce9e7fdd4b10c9" exitCode=0 Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.385587 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j99b6" event={"ID":"6574608b-c907-4cd1-84b8-6778c893ede3","Type":"ContainerDied","Data":"45610aace31f173ee51c49c9e284813f7e44e2c3f6532f8e67ce9e7fdd4b10c9"} Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.385613 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j99b6" event={"ID":"6574608b-c907-4cd1-84b8-6778c893ede3","Type":"ContainerStarted","Data":"ab8aa5fca51b73f0fb5422029bb53c4f9b3eab211f2536ba671aa60761f99506"} Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.391575 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"468032a7-4de1-472c-b1da-d60cd0879891","Type":"ContainerDied","Data":"701febf459cf6306a93acfd6dcb4abdb0c27621880cdf6483b8c8fb5d2cdac38"} Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.391883 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="701febf459cf6306a93acfd6dcb4abdb0c27621880cdf6483b8c8fb5d2cdac38" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.391922 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.853628 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 08:51:38 crc kubenswrapper[4932]: E1125 08:51:38.853913 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="468032a7-4de1-472c-b1da-d60cd0879891" containerName="pruner" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.853947 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="468032a7-4de1-472c-b1da-d60cd0879891" containerName="pruner" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.854053 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="468032a7-4de1-472c-b1da-d60cd0879891" containerName="pruner" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.855288 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.859550 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.863876 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.864100 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.944229 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"06de2009-cc4c-44d7-a855-eaf54f43b3a8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 08:51:38 crc kubenswrapper[4932]: I1125 08:51:38.944358 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"06de2009-cc4c-44d7-a855-eaf54f43b3a8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 08:51:39 crc kubenswrapper[4932]: I1125 08:51:39.046136 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"06de2009-cc4c-44d7-a855-eaf54f43b3a8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 08:51:39 crc kubenswrapper[4932]: I1125 08:51:39.046259 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"06de2009-cc4c-44d7-a855-eaf54f43b3a8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 08:51:39 crc kubenswrapper[4932]: I1125 08:51:39.046393 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"06de2009-cc4c-44d7-a855-eaf54f43b3a8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 08:51:39 crc kubenswrapper[4932]: I1125 08:51:39.074459 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"06de2009-cc4c-44d7-a855-eaf54f43b3a8\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 08:51:39 crc kubenswrapper[4932]: I1125 08:51:39.133874 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:39 crc kubenswrapper[4932]: I1125 08:51:39.137207 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-bmncc" Nov 25 08:51:39 crc kubenswrapper[4932]: I1125 08:51:39.216238 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 08:51:42 crc kubenswrapper[4932]: I1125 08:51:42.271150 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-sdbr8" Nov 25 08:51:44 crc kubenswrapper[4932]: I1125 08:51:44.421651 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:44 crc kubenswrapper[4932]: I1125 08:51:44.441686 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/58f40128-d3fc-4588-ad8f-8cf129079911-metrics-certs\") pod \"network-metrics-daemon-fvbqs\" (UID: \"58f40128-d3fc-4588-ad8f-8cf129079911\") " pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:44 crc kubenswrapper[4932]: I1125 08:51:44.527968 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-fvbqs" Nov 25 08:51:46 crc kubenswrapper[4932]: I1125 08:51:46.434608 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:46 crc kubenswrapper[4932]: I1125 08:51:46.441344 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 08:51:46 crc kubenswrapper[4932]: I1125 08:51:46.596567 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-qtltp" Nov 25 08:51:54 crc kubenswrapper[4932]: I1125 08:51:54.334554 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:52:01 crc kubenswrapper[4932]: E1125 08:52:01.724833 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 08:52:01 crc kubenswrapper[4932]: E1125 08:52:01.725567 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7f6bl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-sfx28_openshift-marketplace(66bd838b-f358-4404-9c27-00bdffad355e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 08:52:01 crc kubenswrapper[4932]: E1125 08:52:01.726670 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-sfx28" podUID="66bd838b-f358-4404-9c27-00bdffad355e" Nov 25 08:52:04 crc kubenswrapper[4932]: E1125 08:52:04.087283 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-sfx28" podUID="66bd838b-f358-4404-9c27-00bdffad355e" Nov 25 08:52:04 crc kubenswrapper[4932]: E1125 08:52:04.181305 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 25 08:52:04 crc kubenswrapper[4932]: E1125 08:52:04.181487 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4b4mx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-tjm2s_openshift-marketplace(8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 08:52:04 crc kubenswrapper[4932]: E1125 08:52:04.182958 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-tjm2s" podUID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" Nov 25 08:52:04 crc kubenswrapper[4932]: I1125 08:52:04.488398 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-fvbqs"] Nov 25 08:52:07 crc kubenswrapper[4932]: I1125 08:52:07.180688 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 08:52:07 crc kubenswrapper[4932]: I1125 08:52:07.180768 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 08:52:07 crc kubenswrapper[4932]: I1125 08:52:07.199801 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-24l97" Nov 25 08:52:07 crc kubenswrapper[4932]: E1125 08:52:07.210367 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-tjm2s" podUID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" Nov 25 08:52:07 crc kubenswrapper[4932]: W1125 08:52:07.417695 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod58f40128_d3fc_4588_ad8f_8cf129079911.slice/crio-17effa3c4978cebacec74aa802794652f3c9a7abe817cdb25488913e7aeb0541 WatchSource:0}: Error finding container 17effa3c4978cebacec74aa802794652f3c9a7abe817cdb25488913e7aeb0541: Status 404 returned error can't find the container with id 17effa3c4978cebacec74aa802794652f3c9a7abe817cdb25488913e7aeb0541 Nov 25 08:52:07 crc kubenswrapper[4932]: E1125 08:52:07.533771 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 25 08:52:07 crc kubenswrapper[4932]: E1125 08:52:07.534182 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mncxn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-6h2tt_openshift-marketplace(cec571af-8e80-4ddb-8218-deab898f34cd): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 08:52:07 crc kubenswrapper[4932]: E1125 08:52:07.535444 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-6h2tt" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" Nov 25 08:52:07 crc kubenswrapper[4932]: I1125 08:52:07.574902 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" event={"ID":"58f40128-d3fc-4588-ad8f-8cf129079911","Type":"ContainerStarted","Data":"17effa3c4978cebacec74aa802794652f3c9a7abe817cdb25488913e7aeb0541"} Nov 25 08:52:07 crc kubenswrapper[4932]: I1125 08:52:07.626060 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 08:52:07 crc kubenswrapper[4932]: E1125 08:52:07.637920 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-6h2tt" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" Nov 25 08:52:07 crc kubenswrapper[4932]: W1125 08:52:07.639695 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod06de2009_cc4c_44d7_a855_eaf54f43b3a8.slice/crio-c9168ff861693a1035c699dca340513f46d78bc2ad38e85d521770eb766d027d WatchSource:0}: Error finding container c9168ff861693a1035c699dca340513f46d78bc2ad38e85d521770eb766d027d: Status 404 returned error can't find the container with id c9168ff861693a1035c699dca340513f46d78bc2ad38e85d521770eb766d027d Nov 25 08:52:07 crc kubenswrapper[4932]: E1125 08:52:07.719586 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 25 08:52:07 crc kubenswrapper[4932]: E1125 08:52:07.719737 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rszq4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-drds8_openshift-marketplace(c944be15-8b3b-417f-9640-2c926704f541): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 08:52:07 crc kubenswrapper[4932]: E1125 08:52:07.721163 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-drds8" podUID="c944be15-8b3b-417f-9640-2c926704f541" Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.583017 4932 generic.go:334] "Generic (PLEG): container finished" podID="6574608b-c907-4cd1-84b8-6778c893ede3" containerID="07fc03d6fb885fc99dbc5dd2b95256b1cfc1c8e8aefd07424e74757bffcde87e" exitCode=0 Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.583103 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j99b6" event={"ID":"6574608b-c907-4cd1-84b8-6778c893ede3","Type":"ContainerDied","Data":"07fc03d6fb885fc99dbc5dd2b95256b1cfc1c8e8aefd07424e74757bffcde87e"} Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.585643 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" event={"ID":"58f40128-d3fc-4588-ad8f-8cf129079911","Type":"ContainerStarted","Data":"f93fb5724d1ab74ae318ec40da5e431858011ebd9b1334e5c0cb39832ceab3d4"} Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.585669 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-fvbqs" event={"ID":"58f40128-d3fc-4588-ad8f-8cf129079911","Type":"ContainerStarted","Data":"67c310aeeef90494068ffe5225736ad8a87fc89f9473bea921c413cfc176d100"} Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.587397 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"06de2009-cc4c-44d7-a855-eaf54f43b3a8","Type":"ContainerStarted","Data":"5eb77e0632d179e401d00e3af03ae813d859995603437d3d64d1b9d75afbdef0"} Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.587523 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"06de2009-cc4c-44d7-a855-eaf54f43b3a8","Type":"ContainerStarted","Data":"c9168ff861693a1035c699dca340513f46d78bc2ad38e85d521770eb766d027d"} Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.590696 4932 generic.go:334] "Generic (PLEG): container finished" podID="df33346c-a298-4a78-b566-70b2a11eb307" containerID="0e70554b4e33e91772ff1d1af9dea6a78541c9cf1f3cc6fabaadb2a7745ecc9a" exitCode=0 Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.590754 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xtp9" event={"ID":"df33346c-a298-4a78-b566-70b2a11eb307","Type":"ContainerDied","Data":"0e70554b4e33e91772ff1d1af9dea6a78541c9cf1f3cc6fabaadb2a7745ecc9a"} Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.595033 4932 generic.go:334] "Generic (PLEG): container finished" podID="e63c0208-0bee-4882-b439-76766480e602" containerID="963532c9ce163c2b40fae2099fe5262813314ffc5ed99b82e8729a183b92d72e" exitCode=0 Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.595111 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x52wc" event={"ID":"e63c0208-0bee-4882-b439-76766480e602","Type":"ContainerDied","Data":"963532c9ce163c2b40fae2099fe5262813314ffc5ed99b82e8729a183b92d72e"} Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.599695 4932 generic.go:334] "Generic (PLEG): container finished" podID="6955d562-1cac-40e0-9a6f-50aa7151560e" containerID="d5b724cb0743b8b01d0115dcb27b54f20134a21548283b4cc4a7d5ab7de3dd45" exitCode=0 Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.601032 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z826r" event={"ID":"6955d562-1cac-40e0-9a6f-50aa7151560e","Type":"ContainerDied","Data":"d5b724cb0743b8b01d0115dcb27b54f20134a21548283b4cc4a7d5ab7de3dd45"} Nov 25 08:52:08 crc kubenswrapper[4932]: E1125 08:52:08.617628 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-drds8" podUID="c944be15-8b3b-417f-9640-2c926704f541" Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.667810 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=30.667790238 podStartE2EDuration="30.667790238s" podCreationTimestamp="2025-11-25 08:51:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:52:08.663947311 +0000 UTC m=+188.789976884" watchObservedRunningTime="2025-11-25 08:52:08.667790238 +0000 UTC m=+188.793819801" Nov 25 08:52:08 crc kubenswrapper[4932]: I1125 08:52:08.695106 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-fvbqs" podStartSLOduration=166.695078819 podStartE2EDuration="2m46.695078819s" podCreationTimestamp="2025-11-25 08:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:52:08.693132485 +0000 UTC m=+188.819162058" watchObservedRunningTime="2025-11-25 08:52:08.695078819 +0000 UTC m=+188.821108402" Nov 25 08:52:09 crc kubenswrapper[4932]: I1125 08:52:09.610142 4932 generic.go:334] "Generic (PLEG): container finished" podID="06de2009-cc4c-44d7-a855-eaf54f43b3a8" containerID="5eb77e0632d179e401d00e3af03ae813d859995603437d3d64d1b9d75afbdef0" exitCode=0 Nov 25 08:52:09 crc kubenswrapper[4932]: I1125 08:52:09.610783 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"06de2009-cc4c-44d7-a855-eaf54f43b3a8","Type":"ContainerDied","Data":"5eb77e0632d179e401d00e3af03ae813d859995603437d3d64d1b9d75afbdef0"} Nov 25 08:52:09 crc kubenswrapper[4932]: I1125 08:52:09.616277 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xtp9" event={"ID":"df33346c-a298-4a78-b566-70b2a11eb307","Type":"ContainerStarted","Data":"cdc5feabd39c2462aafceb81ecc86a2e669b7f0102a03c45887bfb8d794feee3"} Nov 25 08:52:09 crc kubenswrapper[4932]: I1125 08:52:09.618536 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x52wc" event={"ID":"e63c0208-0bee-4882-b439-76766480e602","Type":"ContainerStarted","Data":"d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034"} Nov 25 08:52:09 crc kubenswrapper[4932]: I1125 08:52:09.621601 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z826r" event={"ID":"6955d562-1cac-40e0-9a6f-50aa7151560e","Type":"ContainerStarted","Data":"c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6"} Nov 25 08:52:09 crc kubenswrapper[4932]: I1125 08:52:09.635351 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j99b6" event={"ID":"6574608b-c907-4cd1-84b8-6778c893ede3","Type":"ContainerStarted","Data":"552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a"} Nov 25 08:52:09 crc kubenswrapper[4932]: I1125 08:52:09.653580 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x52wc" podStartSLOduration=2.95578487 podStartE2EDuration="36.653564564s" podCreationTimestamp="2025-11-25 08:51:33 +0000 UTC" firstStartedPulling="2025-11-25 08:51:35.315504667 +0000 UTC m=+155.441534230" lastFinishedPulling="2025-11-25 08:52:09.013284361 +0000 UTC m=+189.139313924" observedRunningTime="2025-11-25 08:52:09.650508373 +0000 UTC m=+189.776537956" watchObservedRunningTime="2025-11-25 08:52:09.653564564 +0000 UTC m=+189.779594117" Nov 25 08:52:09 crc kubenswrapper[4932]: I1125 08:52:09.672447 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5xtp9" podStartSLOduration=2.9088345909999997 podStartE2EDuration="35.672428816s" podCreationTimestamp="2025-11-25 08:51:34 +0000 UTC" firstStartedPulling="2025-11-25 08:51:36.34221108 +0000 UTC m=+156.468240643" lastFinishedPulling="2025-11-25 08:52:09.105805285 +0000 UTC m=+189.231834868" observedRunningTime="2025-11-25 08:52:09.669136317 +0000 UTC m=+189.795165890" watchObservedRunningTime="2025-11-25 08:52:09.672428816 +0000 UTC m=+189.798458379" Nov 25 08:52:09 crc kubenswrapper[4932]: I1125 08:52:09.689309 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z826r" podStartSLOduration=1.988326566 podStartE2EDuration="34.689294073s" podCreationTimestamp="2025-11-25 08:51:35 +0000 UTC" firstStartedPulling="2025-11-25 08:51:36.353724231 +0000 UTC m=+156.479753794" lastFinishedPulling="2025-11-25 08:52:09.054691738 +0000 UTC m=+189.180721301" observedRunningTime="2025-11-25 08:52:09.685690184 +0000 UTC m=+189.811719747" watchObservedRunningTime="2025-11-25 08:52:09.689294073 +0000 UTC m=+189.815323636" Nov 25 08:52:09 crc kubenswrapper[4932]: I1125 08:52:09.703440 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-j99b6" podStartSLOduration=3.052584355 podStartE2EDuration="33.703416649s" podCreationTimestamp="2025-11-25 08:51:36 +0000 UTC" firstStartedPulling="2025-11-25 08:51:38.387615268 +0000 UTC m=+158.513644841" lastFinishedPulling="2025-11-25 08:52:09.038447582 +0000 UTC m=+189.164477135" observedRunningTime="2025-11-25 08:52:09.702582451 +0000 UTC m=+189.828612024" watchObservedRunningTime="2025-11-25 08:52:09.703416649 +0000 UTC m=+189.829446232" Nov 25 08:52:09 crc kubenswrapper[4932]: I1125 08:52:09.744420 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 08:52:10 crc kubenswrapper[4932]: I1125 08:52:10.987477 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 08:52:11 crc kubenswrapper[4932]: I1125 08:52:11.130794 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kube-api-access\") pod \"06de2009-cc4c-44d7-a855-eaf54f43b3a8\" (UID: \"06de2009-cc4c-44d7-a855-eaf54f43b3a8\") " Nov 25 08:52:11 crc kubenswrapper[4932]: I1125 08:52:11.130847 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kubelet-dir\") pod \"06de2009-cc4c-44d7-a855-eaf54f43b3a8\" (UID: \"06de2009-cc4c-44d7-a855-eaf54f43b3a8\") " Nov 25 08:52:11 crc kubenswrapper[4932]: I1125 08:52:11.131149 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "06de2009-cc4c-44d7-a855-eaf54f43b3a8" (UID: "06de2009-cc4c-44d7-a855-eaf54f43b3a8"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 08:52:11 crc kubenswrapper[4932]: I1125 08:52:11.137394 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "06de2009-cc4c-44d7-a855-eaf54f43b3a8" (UID: "06de2009-cc4c-44d7-a855-eaf54f43b3a8"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:52:11 crc kubenswrapper[4932]: I1125 08:52:11.231779 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:11 crc kubenswrapper[4932]: I1125 08:52:11.231812 4932 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/06de2009-cc4c-44d7-a855-eaf54f43b3a8-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:11 crc kubenswrapper[4932]: I1125 08:52:11.646680 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"06de2009-cc4c-44d7-a855-eaf54f43b3a8","Type":"ContainerDied","Data":"c9168ff861693a1035c699dca340513f46d78bc2ad38e85d521770eb766d027d"} Nov 25 08:52:11 crc kubenswrapper[4932]: I1125 08:52:11.646723 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9168ff861693a1035c699dca340513f46d78bc2ad38e85d521770eb766d027d" Nov 25 08:52:11 crc kubenswrapper[4932]: I1125 08:52:11.646792 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 08:52:13 crc kubenswrapper[4932]: I1125 08:52:13.931698 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:52:13 crc kubenswrapper[4932]: I1125 08:52:13.931960 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:52:14 crc kubenswrapper[4932]: I1125 08:52:14.350005 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:52:14 crc kubenswrapper[4932]: I1125 08:52:14.698781 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ksq5j"] Nov 25 08:52:14 crc kubenswrapper[4932]: I1125 08:52:14.713067 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:52:15 crc kubenswrapper[4932]: I1125 08:52:15.338051 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:52:15 crc kubenswrapper[4932]: I1125 08:52:15.338103 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:52:15 crc kubenswrapper[4932]: I1125 08:52:15.379229 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:52:15 crc kubenswrapper[4932]: I1125 08:52:15.711317 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:52:15 crc kubenswrapper[4932]: I1125 08:52:15.788521 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:52:15 crc kubenswrapper[4932]: I1125 08:52:15.788582 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:52:15 crc kubenswrapper[4932]: I1125 08:52:15.827949 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:52:15 crc kubenswrapper[4932]: I1125 08:52:15.973244 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x52wc"] Nov 25 08:52:16 crc kubenswrapper[4932]: I1125 08:52:16.673923 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-x52wc" podUID="e63c0208-0bee-4882-b439-76766480e602" containerName="registry-server" containerID="cri-o://d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034" gracePeriod=2 Nov 25 08:52:16 crc kubenswrapper[4932]: I1125 08:52:16.708962 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:52:16 crc kubenswrapper[4932]: I1125 08:52:16.931150 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:52:16 crc kubenswrapper[4932]: I1125 08:52:16.931307 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:52:16 crc kubenswrapper[4932]: I1125 08:52:16.980939 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.121080 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.205016 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-utilities\") pod \"e63c0208-0bee-4882-b439-76766480e602\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.205738 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-utilities" (OuterVolumeSpecName: "utilities") pod "e63c0208-0bee-4882-b439-76766480e602" (UID: "e63c0208-0bee-4882-b439-76766480e602"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.305771 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kn6vh\" (UniqueName: \"kubernetes.io/projected/e63c0208-0bee-4882-b439-76766480e602-kube-api-access-kn6vh\") pod \"e63c0208-0bee-4882-b439-76766480e602\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.305909 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-catalog-content\") pod \"e63c0208-0bee-4882-b439-76766480e602\" (UID: \"e63c0208-0bee-4882-b439-76766480e602\") " Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.306128 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.314408 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e63c0208-0bee-4882-b439-76766480e602-kube-api-access-kn6vh" (OuterVolumeSpecName: "kube-api-access-kn6vh") pod "e63c0208-0bee-4882-b439-76766480e602" (UID: "e63c0208-0bee-4882-b439-76766480e602"). InnerVolumeSpecName "kube-api-access-kn6vh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.374094 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e63c0208-0bee-4882-b439-76766480e602" (UID: "e63c0208-0bee-4882-b439-76766480e602"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.407403 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e63c0208-0bee-4882-b439-76766480e602-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.407445 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kn6vh\" (UniqueName: \"kubernetes.io/projected/e63c0208-0bee-4882-b439-76766480e602-kube-api-access-kn6vh\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.680277 4932 generic.go:334] "Generic (PLEG): container finished" podID="e63c0208-0bee-4882-b439-76766480e602" containerID="d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034" exitCode=0 Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.680337 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x52wc" event={"ID":"e63c0208-0bee-4882-b439-76766480e602","Type":"ContainerDied","Data":"d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034"} Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.680378 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x52wc" event={"ID":"e63c0208-0bee-4882-b439-76766480e602","Type":"ContainerDied","Data":"7f6c1eb0856efab1e313396cf89bbbb40c87929e784512854d0ff115aafb0c82"} Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.680396 4932 scope.go:117] "RemoveContainer" containerID="d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.680503 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x52wc" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.700525 4932 scope.go:117] "RemoveContainer" containerID="963532c9ce163c2b40fae2099fe5262813314ffc5ed99b82e8729a183b92d72e" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.708492 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-x52wc"] Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.714134 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-x52wc"] Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.729023 4932 scope.go:117] "RemoveContainer" containerID="974052f58c1f397df2ed42ed095fd7105639bb73ff21f92898cc746edc67e99a" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.732137 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.755667 4932 scope.go:117] "RemoveContainer" containerID="d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034" Nov 25 08:52:17 crc kubenswrapper[4932]: E1125 08:52:17.757729 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034\": container with ID starting with d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034 not found: ID does not exist" containerID="d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.757767 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034"} err="failed to get container status \"d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034\": rpc error: code = NotFound desc = could not find container \"d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034\": container with ID starting with d160c304698add0f70d6417fce86310cd0769ab89e3fc0c34bfc234852c76034 not found: ID does not exist" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.757815 4932 scope.go:117] "RemoveContainer" containerID="963532c9ce163c2b40fae2099fe5262813314ffc5ed99b82e8729a183b92d72e" Nov 25 08:52:17 crc kubenswrapper[4932]: E1125 08:52:17.759572 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"963532c9ce163c2b40fae2099fe5262813314ffc5ed99b82e8729a183b92d72e\": container with ID starting with 963532c9ce163c2b40fae2099fe5262813314ffc5ed99b82e8729a183b92d72e not found: ID does not exist" containerID="963532c9ce163c2b40fae2099fe5262813314ffc5ed99b82e8729a183b92d72e" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.759603 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"963532c9ce163c2b40fae2099fe5262813314ffc5ed99b82e8729a183b92d72e"} err="failed to get container status \"963532c9ce163c2b40fae2099fe5262813314ffc5ed99b82e8729a183b92d72e\": rpc error: code = NotFound desc = could not find container \"963532c9ce163c2b40fae2099fe5262813314ffc5ed99b82e8729a183b92d72e\": container with ID starting with 963532c9ce163c2b40fae2099fe5262813314ffc5ed99b82e8729a183b92d72e not found: ID does not exist" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.759622 4932 scope.go:117] "RemoveContainer" containerID="974052f58c1f397df2ed42ed095fd7105639bb73ff21f92898cc746edc67e99a" Nov 25 08:52:17 crc kubenswrapper[4932]: E1125 08:52:17.760176 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"974052f58c1f397df2ed42ed095fd7105639bb73ff21f92898cc746edc67e99a\": container with ID starting with 974052f58c1f397df2ed42ed095fd7105639bb73ff21f92898cc746edc67e99a not found: ID does not exist" containerID="974052f58c1f397df2ed42ed095fd7105639bb73ff21f92898cc746edc67e99a" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.760219 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"974052f58c1f397df2ed42ed095fd7105639bb73ff21f92898cc746edc67e99a"} err="failed to get container status \"974052f58c1f397df2ed42ed095fd7105639bb73ff21f92898cc746edc67e99a\": rpc error: code = NotFound desc = could not find container \"974052f58c1f397df2ed42ed095fd7105639bb73ff21f92898cc746edc67e99a\": container with ID starting with 974052f58c1f397df2ed42ed095fd7105639bb73ff21f92898cc746edc67e99a not found: ID does not exist" Nov 25 08:52:17 crc kubenswrapper[4932]: I1125 08:52:17.773920 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z826r"] Nov 25 08:52:18 crc kubenswrapper[4932]: I1125 08:52:18.616841 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e63c0208-0bee-4882-b439-76766480e602" path="/var/lib/kubelet/pods/e63c0208-0bee-4882-b439-76766480e602/volumes" Nov 25 08:52:18 crc kubenswrapper[4932]: I1125 08:52:18.684833 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z826r" podUID="6955d562-1cac-40e0-9a6f-50aa7151560e" containerName="registry-server" containerID="cri-o://c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6" gracePeriod=2 Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.114616 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.227431 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7zls\" (UniqueName: \"kubernetes.io/projected/6955d562-1cac-40e0-9a6f-50aa7151560e-kube-api-access-v7zls\") pod \"6955d562-1cac-40e0-9a6f-50aa7151560e\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.227504 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-utilities\") pod \"6955d562-1cac-40e0-9a6f-50aa7151560e\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.227588 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-catalog-content\") pod \"6955d562-1cac-40e0-9a6f-50aa7151560e\" (UID: \"6955d562-1cac-40e0-9a6f-50aa7151560e\") " Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.228305 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-utilities" (OuterVolumeSpecName: "utilities") pod "6955d562-1cac-40e0-9a6f-50aa7151560e" (UID: "6955d562-1cac-40e0-9a6f-50aa7151560e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.232231 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6955d562-1cac-40e0-9a6f-50aa7151560e-kube-api-access-v7zls" (OuterVolumeSpecName: "kube-api-access-v7zls") pod "6955d562-1cac-40e0-9a6f-50aa7151560e" (UID: "6955d562-1cac-40e0-9a6f-50aa7151560e"). InnerVolumeSpecName "kube-api-access-v7zls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.245054 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6955d562-1cac-40e0-9a6f-50aa7151560e" (UID: "6955d562-1cac-40e0-9a6f-50aa7151560e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.328839 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.328872 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7zls\" (UniqueName: \"kubernetes.io/projected/6955d562-1cac-40e0-9a6f-50aa7151560e-kube-api-access-v7zls\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.328886 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6955d562-1cac-40e0-9a6f-50aa7151560e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.691405 4932 generic.go:334] "Generic (PLEG): container finished" podID="6955d562-1cac-40e0-9a6f-50aa7151560e" containerID="c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6" exitCode=0 Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.691585 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z826r" event={"ID":"6955d562-1cac-40e0-9a6f-50aa7151560e","Type":"ContainerDied","Data":"c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6"} Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.691642 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z826r" event={"ID":"6955d562-1cac-40e0-9a6f-50aa7151560e","Type":"ContainerDied","Data":"c6e8d18606579c1345dd29556cc70a8399da089ff536853220d022470b514a98"} Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.691667 4932 scope.go:117] "RemoveContainer" containerID="c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.691752 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z826r" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.713085 4932 scope.go:117] "RemoveContainer" containerID="d5b724cb0743b8b01d0115dcb27b54f20134a21548283b4cc4a7d5ab7de3dd45" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.723796 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z826r"] Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.727469 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z826r"] Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.741402 4932 scope.go:117] "RemoveContainer" containerID="88bb3701e0e18360efd7b6a9c4d69d99e88976a8dc8a367a485045cf0e6cbb10" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.753964 4932 scope.go:117] "RemoveContainer" containerID="c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6" Nov 25 08:52:19 crc kubenswrapper[4932]: E1125 08:52:19.754336 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6\": container with ID starting with c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6 not found: ID does not exist" containerID="c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.754367 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6"} err="failed to get container status \"c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6\": rpc error: code = NotFound desc = could not find container \"c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6\": container with ID starting with c2d0c556942dd279b743e112f3efcfb446d2999bbb491669aa7ecbd742bc0be6 not found: ID does not exist" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.754388 4932 scope.go:117] "RemoveContainer" containerID="d5b724cb0743b8b01d0115dcb27b54f20134a21548283b4cc4a7d5ab7de3dd45" Nov 25 08:52:19 crc kubenswrapper[4932]: E1125 08:52:19.754743 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5b724cb0743b8b01d0115dcb27b54f20134a21548283b4cc4a7d5ab7de3dd45\": container with ID starting with d5b724cb0743b8b01d0115dcb27b54f20134a21548283b4cc4a7d5ab7de3dd45 not found: ID does not exist" containerID="d5b724cb0743b8b01d0115dcb27b54f20134a21548283b4cc4a7d5ab7de3dd45" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.754786 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5b724cb0743b8b01d0115dcb27b54f20134a21548283b4cc4a7d5ab7de3dd45"} err="failed to get container status \"d5b724cb0743b8b01d0115dcb27b54f20134a21548283b4cc4a7d5ab7de3dd45\": rpc error: code = NotFound desc = could not find container \"d5b724cb0743b8b01d0115dcb27b54f20134a21548283b4cc4a7d5ab7de3dd45\": container with ID starting with d5b724cb0743b8b01d0115dcb27b54f20134a21548283b4cc4a7d5ab7de3dd45 not found: ID does not exist" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.754819 4932 scope.go:117] "RemoveContainer" containerID="88bb3701e0e18360efd7b6a9c4d69d99e88976a8dc8a367a485045cf0e6cbb10" Nov 25 08:52:19 crc kubenswrapper[4932]: E1125 08:52:19.755092 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88bb3701e0e18360efd7b6a9c4d69d99e88976a8dc8a367a485045cf0e6cbb10\": container with ID starting with 88bb3701e0e18360efd7b6a9c4d69d99e88976a8dc8a367a485045cf0e6cbb10 not found: ID does not exist" containerID="88bb3701e0e18360efd7b6a9c4d69d99e88976a8dc8a367a485045cf0e6cbb10" Nov 25 08:52:19 crc kubenswrapper[4932]: I1125 08:52:19.755116 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88bb3701e0e18360efd7b6a9c4d69d99e88976a8dc8a367a485045cf0e6cbb10"} err="failed to get container status \"88bb3701e0e18360efd7b6a9c4d69d99e88976a8dc8a367a485045cf0e6cbb10\": rpc error: code = NotFound desc = could not find container \"88bb3701e0e18360efd7b6a9c4d69d99e88976a8dc8a367a485045cf0e6cbb10\": container with ID starting with 88bb3701e0e18360efd7b6a9c4d69d99e88976a8dc8a367a485045cf0e6cbb10 not found: ID does not exist" Nov 25 08:52:20 crc kubenswrapper[4932]: I1125 08:52:20.172560 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j99b6"] Nov 25 08:52:20 crc kubenswrapper[4932]: I1125 08:52:20.618746 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6955d562-1cac-40e0-9a6f-50aa7151560e" path="/var/lib/kubelet/pods/6955d562-1cac-40e0-9a6f-50aa7151560e/volumes" Nov 25 08:52:20 crc kubenswrapper[4932]: I1125 08:52:20.698935 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-j99b6" podUID="6574608b-c907-4cd1-84b8-6778c893ede3" containerName="registry-server" containerID="cri-o://552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a" gracePeriod=2 Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.553591 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.672455 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpdfv\" (UniqueName: \"kubernetes.io/projected/6574608b-c907-4cd1-84b8-6778c893ede3-kube-api-access-qpdfv\") pod \"6574608b-c907-4cd1-84b8-6778c893ede3\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.672603 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-utilities\") pod \"6574608b-c907-4cd1-84b8-6778c893ede3\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.672661 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-catalog-content\") pod \"6574608b-c907-4cd1-84b8-6778c893ede3\" (UID: \"6574608b-c907-4cd1-84b8-6778c893ede3\") " Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.673733 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-utilities" (OuterVolumeSpecName: "utilities") pod "6574608b-c907-4cd1-84b8-6778c893ede3" (UID: "6574608b-c907-4cd1-84b8-6778c893ede3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.674102 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.682165 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6574608b-c907-4cd1-84b8-6778c893ede3-kube-api-access-qpdfv" (OuterVolumeSpecName: "kube-api-access-qpdfv") pod "6574608b-c907-4cd1-84b8-6778c893ede3" (UID: "6574608b-c907-4cd1-84b8-6778c893ede3"). InnerVolumeSpecName "kube-api-access-qpdfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.717912 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sfx28" event={"ID":"66bd838b-f358-4404-9c27-00bdffad355e","Type":"ContainerStarted","Data":"5c1e6db1dee77568877b5eebc8318f5ebe6f0015a1aa20a5bd08fe4568d820ce"} Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.720552 4932 generic.go:334] "Generic (PLEG): container finished" podID="6574608b-c907-4cd1-84b8-6778c893ede3" containerID="552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a" exitCode=0 Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.720607 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j99b6" event={"ID":"6574608b-c907-4cd1-84b8-6778c893ede3","Type":"ContainerDied","Data":"552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a"} Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.720630 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j99b6" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.720640 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j99b6" event={"ID":"6574608b-c907-4cd1-84b8-6778c893ede3","Type":"ContainerDied","Data":"ab8aa5fca51b73f0fb5422029bb53c4f9b3eab211f2536ba671aa60761f99506"} Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.720662 4932 scope.go:117] "RemoveContainer" containerID="552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.739507 4932 scope.go:117] "RemoveContainer" containerID="07fc03d6fb885fc99dbc5dd2b95256b1cfc1c8e8aefd07424e74757bffcde87e" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.775151 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpdfv\" (UniqueName: \"kubernetes.io/projected/6574608b-c907-4cd1-84b8-6778c893ede3-kube-api-access-qpdfv\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.776251 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6574608b-c907-4cd1-84b8-6778c893ede3" (UID: "6574608b-c907-4cd1-84b8-6778c893ede3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.807350 4932 scope.go:117] "RemoveContainer" containerID="45610aace31f173ee51c49c9e284813f7e44e2c3f6532f8e67ce9e7fdd4b10c9" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.819644 4932 scope.go:117] "RemoveContainer" containerID="552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a" Nov 25 08:52:22 crc kubenswrapper[4932]: E1125 08:52:22.819965 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a\": container with ID starting with 552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a not found: ID does not exist" containerID="552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.819996 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a"} err="failed to get container status \"552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a\": rpc error: code = NotFound desc = could not find container \"552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a\": container with ID starting with 552a0e34493ef805060abc00fff5c3b3840d865f2f50fbc4d7f2c1de91107e3a not found: ID does not exist" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.820016 4932 scope.go:117] "RemoveContainer" containerID="07fc03d6fb885fc99dbc5dd2b95256b1cfc1c8e8aefd07424e74757bffcde87e" Nov 25 08:52:22 crc kubenswrapper[4932]: E1125 08:52:22.820335 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07fc03d6fb885fc99dbc5dd2b95256b1cfc1c8e8aefd07424e74757bffcde87e\": container with ID starting with 07fc03d6fb885fc99dbc5dd2b95256b1cfc1c8e8aefd07424e74757bffcde87e not found: ID does not exist" containerID="07fc03d6fb885fc99dbc5dd2b95256b1cfc1c8e8aefd07424e74757bffcde87e" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.820358 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07fc03d6fb885fc99dbc5dd2b95256b1cfc1c8e8aefd07424e74757bffcde87e"} err="failed to get container status \"07fc03d6fb885fc99dbc5dd2b95256b1cfc1c8e8aefd07424e74757bffcde87e\": rpc error: code = NotFound desc = could not find container \"07fc03d6fb885fc99dbc5dd2b95256b1cfc1c8e8aefd07424e74757bffcde87e\": container with ID starting with 07fc03d6fb885fc99dbc5dd2b95256b1cfc1c8e8aefd07424e74757bffcde87e not found: ID does not exist" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.820375 4932 scope.go:117] "RemoveContainer" containerID="45610aace31f173ee51c49c9e284813f7e44e2c3f6532f8e67ce9e7fdd4b10c9" Nov 25 08:52:22 crc kubenswrapper[4932]: E1125 08:52:22.820587 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45610aace31f173ee51c49c9e284813f7e44e2c3f6532f8e67ce9e7fdd4b10c9\": container with ID starting with 45610aace31f173ee51c49c9e284813f7e44e2c3f6532f8e67ce9e7fdd4b10c9 not found: ID does not exist" containerID="45610aace31f173ee51c49c9e284813f7e44e2c3f6532f8e67ce9e7fdd4b10c9" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.820615 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45610aace31f173ee51c49c9e284813f7e44e2c3f6532f8e67ce9e7fdd4b10c9"} err="failed to get container status \"45610aace31f173ee51c49c9e284813f7e44e2c3f6532f8e67ce9e7fdd4b10c9\": rpc error: code = NotFound desc = could not find container \"45610aace31f173ee51c49c9e284813f7e44e2c3f6532f8e67ce9e7fdd4b10c9\": container with ID starting with 45610aace31f173ee51c49c9e284813f7e44e2c3f6532f8e67ce9e7fdd4b10c9 not found: ID does not exist" Nov 25 08:52:22 crc kubenswrapper[4932]: I1125 08:52:22.878130 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6574608b-c907-4cd1-84b8-6778c893ede3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:23 crc kubenswrapper[4932]: I1125 08:52:23.044888 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j99b6"] Nov 25 08:52:23 crc kubenswrapper[4932]: I1125 08:52:23.049807 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-j99b6"] Nov 25 08:52:23 crc kubenswrapper[4932]: I1125 08:52:23.727139 4932 generic.go:334] "Generic (PLEG): container finished" podID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" containerID="d0e49e1d619071ff97438d76cc42144429ed8b0c6aa1080b0cf015c5008a595a" exitCode=0 Nov 25 08:52:23 crc kubenswrapper[4932]: I1125 08:52:23.727173 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjm2s" event={"ID":"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd","Type":"ContainerDied","Data":"d0e49e1d619071ff97438d76cc42144429ed8b0c6aa1080b0cf015c5008a595a"} Nov 25 08:52:23 crc kubenswrapper[4932]: I1125 08:52:23.730576 4932 generic.go:334] "Generic (PLEG): container finished" podID="66bd838b-f358-4404-9c27-00bdffad355e" containerID="5c1e6db1dee77568877b5eebc8318f5ebe6f0015a1aa20a5bd08fe4568d820ce" exitCode=0 Nov 25 08:52:23 crc kubenswrapper[4932]: I1125 08:52:23.730642 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sfx28" event={"ID":"66bd838b-f358-4404-9c27-00bdffad355e","Type":"ContainerDied","Data":"5c1e6db1dee77568877b5eebc8318f5ebe6f0015a1aa20a5bd08fe4568d820ce"} Nov 25 08:52:23 crc kubenswrapper[4932]: I1125 08:52:23.735934 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6h2tt" event={"ID":"cec571af-8e80-4ddb-8218-deab898f34cd","Type":"ContainerStarted","Data":"cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904"} Nov 25 08:52:24 crc kubenswrapper[4932]: I1125 08:52:24.611775 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6574608b-c907-4cd1-84b8-6778c893ede3" path="/var/lib/kubelet/pods/6574608b-c907-4cd1-84b8-6778c893ede3/volumes" Nov 25 08:52:24 crc kubenswrapper[4932]: I1125 08:52:24.742727 4932 generic.go:334] "Generic (PLEG): container finished" podID="c944be15-8b3b-417f-9640-2c926704f541" containerID="622ab97cae7dffb92fbdfa54c75588b591b1eaf16fb6eb11d106a36f02025702" exitCode=0 Nov 25 08:52:24 crc kubenswrapper[4932]: I1125 08:52:24.742800 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drds8" event={"ID":"c944be15-8b3b-417f-9640-2c926704f541","Type":"ContainerDied","Data":"622ab97cae7dffb92fbdfa54c75588b591b1eaf16fb6eb11d106a36f02025702"} Nov 25 08:52:24 crc kubenswrapper[4932]: I1125 08:52:24.747264 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sfx28" event={"ID":"66bd838b-f358-4404-9c27-00bdffad355e","Type":"ContainerStarted","Data":"b5a9251f501243d423f59e4d57a0e603aed886f0d591ce2b66b6a0f80c25bf68"} Nov 25 08:52:24 crc kubenswrapper[4932]: I1125 08:52:24.749994 4932 generic.go:334] "Generic (PLEG): container finished" podID="cec571af-8e80-4ddb-8218-deab898f34cd" containerID="cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904" exitCode=0 Nov 25 08:52:24 crc kubenswrapper[4932]: I1125 08:52:24.750056 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6h2tt" event={"ID":"cec571af-8e80-4ddb-8218-deab898f34cd","Type":"ContainerDied","Data":"cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904"} Nov 25 08:52:24 crc kubenswrapper[4932]: I1125 08:52:24.754511 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjm2s" event={"ID":"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd","Type":"ContainerStarted","Data":"5a08c6bda3e2c889ee14eb3eaca412d1fa2fbca37f544b3fccf6d99fe3fa3e91"} Nov 25 08:52:24 crc kubenswrapper[4932]: I1125 08:52:24.776240 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sfx28" podStartSLOduration=1.886221748 podStartE2EDuration="51.776225542s" podCreationTimestamp="2025-11-25 08:51:33 +0000 UTC" firstStartedPulling="2025-11-25 08:51:34.296324214 +0000 UTC m=+154.422353777" lastFinishedPulling="2025-11-25 08:52:24.186327998 +0000 UTC m=+204.312357571" observedRunningTime="2025-11-25 08:52:24.77445517 +0000 UTC m=+204.900484733" watchObservedRunningTime="2025-11-25 08:52:24.776225542 +0000 UTC m=+204.902255105" Nov 25 08:52:24 crc kubenswrapper[4932]: I1125 08:52:24.789849 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tjm2s" podStartSLOduration=1.941759578 podStartE2EDuration="51.789835416s" podCreationTimestamp="2025-11-25 08:51:33 +0000 UTC" firstStartedPulling="2025-11-25 08:51:34.300037042 +0000 UTC m=+154.426066605" lastFinishedPulling="2025-11-25 08:52:24.14811286 +0000 UTC m=+204.274142443" observedRunningTime="2025-11-25 08:52:24.789157227 +0000 UTC m=+204.915186790" watchObservedRunningTime="2025-11-25 08:52:24.789835416 +0000 UTC m=+204.915864979" Nov 25 08:52:25 crc kubenswrapper[4932]: I1125 08:52:25.761921 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6h2tt" event={"ID":"cec571af-8e80-4ddb-8218-deab898f34cd","Type":"ContainerStarted","Data":"5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18"} Nov 25 08:52:25 crc kubenswrapper[4932]: I1125 08:52:25.764732 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drds8" event={"ID":"c944be15-8b3b-417f-9640-2c926704f541","Type":"ContainerStarted","Data":"5d4c63c0a497f5c34d7d411e410cb59c4a0af17936b2c4ed2b98ce109272d285"} Nov 25 08:52:25 crc kubenswrapper[4932]: I1125 08:52:25.781004 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6h2tt" podStartSLOduration=2.004560237 podStartE2EDuration="49.780988373s" podCreationTimestamp="2025-11-25 08:51:36 +0000 UTC" firstStartedPulling="2025-11-25 08:51:37.375668844 +0000 UTC m=+157.501698407" lastFinishedPulling="2025-11-25 08:52:25.15209698 +0000 UTC m=+205.278126543" observedRunningTime="2025-11-25 08:52:25.778639925 +0000 UTC m=+205.904669498" watchObservedRunningTime="2025-11-25 08:52:25.780988373 +0000 UTC m=+205.907017936" Nov 25 08:52:25 crc kubenswrapper[4932]: I1125 08:52:25.798027 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-drds8" podStartSLOduration=1.923702773 podStartE2EDuration="52.798008476s" podCreationTimestamp="2025-11-25 08:51:33 +0000 UTC" firstStartedPulling="2025-11-25 08:51:34.304495821 +0000 UTC m=+154.430525384" lastFinishedPulling="2025-11-25 08:52:25.178801514 +0000 UTC m=+205.304831087" observedRunningTime="2025-11-25 08:52:25.795084202 +0000 UTC m=+205.921113765" watchObservedRunningTime="2025-11-25 08:52:25.798008476 +0000 UTC m=+205.924038039" Nov 25 08:52:26 crc kubenswrapper[4932]: I1125 08:52:26.538575 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:52:26 crc kubenswrapper[4932]: I1125 08:52:26.538625 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:52:27 crc kubenswrapper[4932]: I1125 08:52:27.582347 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6h2tt" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" containerName="registry-server" probeResult="failure" output=< Nov 25 08:52:27 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 08:52:27 crc kubenswrapper[4932]: > Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.356589 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.357344 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.426937 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.527139 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-drds8" Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.528019 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-drds8" Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.569985 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-drds8" Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.735069 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.735129 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.779153 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.844342 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-drds8" Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.859848 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:52:33 crc kubenswrapper[4932]: I1125 08:52:33.860597 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:52:34 crc kubenswrapper[4932]: I1125 08:52:34.971509 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sfx28"] Nov 25 08:52:35 crc kubenswrapper[4932]: I1125 08:52:35.819953 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sfx28" podUID="66bd838b-f358-4404-9c27-00bdffad355e" containerName="registry-server" containerID="cri-o://b5a9251f501243d423f59e4d57a0e603aed886f0d591ce2b66b6a0f80c25bf68" gracePeriod=2 Nov 25 08:52:36 crc kubenswrapper[4932]: I1125 08:52:36.587724 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:52:36 crc kubenswrapper[4932]: I1125 08:52:36.649208 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:52:37 crc kubenswrapper[4932]: I1125 08:52:37.181634 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 08:52:37 crc kubenswrapper[4932]: I1125 08:52:37.181703 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 08:52:37 crc kubenswrapper[4932]: I1125 08:52:37.181755 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:52:37 crc kubenswrapper[4932]: I1125 08:52:37.182411 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 08:52:37 crc kubenswrapper[4932]: I1125 08:52:37.182484 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177" gracePeriod=600 Nov 25 08:52:38 crc kubenswrapper[4932]: I1125 08:52:38.846718 4932 generic.go:334] "Generic (PLEG): container finished" podID="66bd838b-f358-4404-9c27-00bdffad355e" containerID="b5a9251f501243d423f59e4d57a0e603aed886f0d591ce2b66b6a0f80c25bf68" exitCode=0 Nov 25 08:52:38 crc kubenswrapper[4932]: I1125 08:52:38.847455 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sfx28" event={"ID":"66bd838b-f358-4404-9c27-00bdffad355e","Type":"ContainerDied","Data":"b5a9251f501243d423f59e4d57a0e603aed886f0d591ce2b66b6a0f80c25bf68"} Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.339303 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.357921 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-utilities\") pod \"66bd838b-f358-4404-9c27-00bdffad355e\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.358117 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-catalog-content\") pod \"66bd838b-f358-4404-9c27-00bdffad355e\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.358153 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7f6bl\" (UniqueName: \"kubernetes.io/projected/66bd838b-f358-4404-9c27-00bdffad355e-kube-api-access-7f6bl\") pod \"66bd838b-f358-4404-9c27-00bdffad355e\" (UID: \"66bd838b-f358-4404-9c27-00bdffad355e\") " Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.358819 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-utilities" (OuterVolumeSpecName: "utilities") pod "66bd838b-f358-4404-9c27-00bdffad355e" (UID: "66bd838b-f358-4404-9c27-00bdffad355e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.372132 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66bd838b-f358-4404-9c27-00bdffad355e-kube-api-access-7f6bl" (OuterVolumeSpecName: "kube-api-access-7f6bl") pod "66bd838b-f358-4404-9c27-00bdffad355e" (UID: "66bd838b-f358-4404-9c27-00bdffad355e"). InnerVolumeSpecName "kube-api-access-7f6bl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.424830 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "66bd838b-f358-4404-9c27-00bdffad355e" (UID: "66bd838b-f358-4404-9c27-00bdffad355e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.459791 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.460181 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/66bd838b-f358-4404-9c27-00bdffad355e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.460227 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7f6bl\" (UniqueName: \"kubernetes.io/projected/66bd838b-f358-4404-9c27-00bdffad355e-kube-api-access-7f6bl\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.726676 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" podUID="21fffc77-e724-4f48-ac20-f21104224241" containerName="oauth-openshift" containerID="cri-o://ed412641e3d1177ff3c499dcd0153ab70574d8210910cb45c9dcdae44f69e33f" gracePeriod=15 Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.855685 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177" exitCode=0 Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.855741 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177"} Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.864478 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sfx28" event={"ID":"66bd838b-f358-4404-9c27-00bdffad355e","Type":"ContainerDied","Data":"e24fe6a16c69ecf6bea925bddcbbee721e35aa8261c8048dae2a70aab496df72"} Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.864543 4932 scope.go:117] "RemoveContainer" containerID="b5a9251f501243d423f59e4d57a0e603aed886f0d591ce2b66b6a0f80c25bf68" Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.864683 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sfx28" Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.888843 4932 scope.go:117] "RemoveContainer" containerID="5c1e6db1dee77568877b5eebc8318f5ebe6f0015a1aa20a5bd08fe4568d820ce" Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.899322 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sfx28"] Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.902356 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sfx28"] Nov 25 08:52:39 crc kubenswrapper[4932]: I1125 08:52:39.925835 4932 scope.go:117] "RemoveContainer" containerID="e3c5cafcb63ec5a271698a7785c1ab63dd2a9ebaa3bf1c2f49bf0f03a1d2c13f" Nov 25 08:52:40 crc kubenswrapper[4932]: I1125 08:52:40.614747 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66bd838b-f358-4404-9c27-00bdffad355e" path="/var/lib/kubelet/pods/66bd838b-f358-4404-9c27-00bdffad355e/volumes" Nov 25 08:52:40 crc kubenswrapper[4932]: I1125 08:52:40.873305 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"1be3115c0f5e9ad2a9965b1d4ca20e00cbc062a1f0c3346ce11cc851d5ae811c"} Nov 25 08:52:40 crc kubenswrapper[4932]: I1125 08:52:40.876502 4932 generic.go:334] "Generic (PLEG): container finished" podID="21fffc77-e724-4f48-ac20-f21104224241" containerID="ed412641e3d1177ff3c499dcd0153ab70574d8210910cb45c9dcdae44f69e33f" exitCode=0 Nov 25 08:52:40 crc kubenswrapper[4932]: I1125 08:52:40.876554 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" event={"ID":"21fffc77-e724-4f48-ac20-f21104224241","Type":"ContainerDied","Data":"ed412641e3d1177ff3c499dcd0153ab70574d8210910cb45c9dcdae44f69e33f"} Nov 25 08:52:40 crc kubenswrapper[4932]: I1125 08:52:40.876586 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" event={"ID":"21fffc77-e724-4f48-ac20-f21104224241","Type":"ContainerDied","Data":"3c6b19f86770ffbae1cfee8c26285e3e2087928a80f70f4d681c7781ee826fb1"} Nov 25 08:52:40 crc kubenswrapper[4932]: I1125 08:52:40.876601 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3c6b19f86770ffbae1cfee8c26285e3e2087928a80f70f4d681c7781ee826fb1" Nov 25 08:52:40 crc kubenswrapper[4932]: I1125 08:52:40.883953 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.081557 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-login\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.081704 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-router-certs\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.081742 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-ocp-branding-template\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.081787 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-error\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.081841 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-audit-policies\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.081918 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-service-ca\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.081958 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-session\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.082013 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-provider-selection\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.082054 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-cliconfig\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.082102 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-trusted-ca-bundle\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.082144 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/21fffc77-e724-4f48-ac20-f21104224241-audit-dir\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.082224 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-serving-cert\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.082262 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khl2w\" (UniqueName: \"kubernetes.io/projected/21fffc77-e724-4f48-ac20-f21104224241-kube-api-access-khl2w\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.082307 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-idp-0-file-data\") pod \"21fffc77-e724-4f48-ac20-f21104224241\" (UID: \"21fffc77-e724-4f48-ac20-f21104224241\") " Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.082980 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/21fffc77-e724-4f48-ac20-f21104224241-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.083763 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.083830 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.084416 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.084617 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.088245 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.089371 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.090174 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.091691 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.092033 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.094534 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.094570 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21fffc77-e724-4f48-ac20-f21104224241-kube-api-access-khl2w" (OuterVolumeSpecName: "kube-api-access-khl2w") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "kube-api-access-khl2w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.094834 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.094965 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "21fffc77-e724-4f48-ac20-f21104224241" (UID: "21fffc77-e724-4f48-ac20-f21104224241"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183302 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183347 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183364 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183377 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183390 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183403 4932 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/21fffc77-e724-4f48-ac20-f21104224241-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183415 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183428 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khl2w\" (UniqueName: \"kubernetes.io/projected/21fffc77-e724-4f48-ac20-f21104224241-kube-api-access-khl2w\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183440 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183450 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183462 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183484 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183511 4932 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/21fffc77-e724-4f48-ac20-f21104224241-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.183527 4932 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/21fffc77-e724-4f48-ac20-f21104224241-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.885415 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-ksq5j" Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.954263 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ksq5j"] Nov 25 08:52:41 crc kubenswrapper[4932]: I1125 08:52:41.961692 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-ksq5j"] Nov 25 08:52:42 crc kubenswrapper[4932]: I1125 08:52:42.611277 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21fffc77-e724-4f48-ac20-f21104224241" path="/var/lib/kubelet/pods/21fffc77-e724-4f48-ac20-f21104224241/volumes" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.167664 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-857d94f549-crnsf"] Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168659 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6955d562-1cac-40e0-9a6f-50aa7151560e" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.168682 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6955d562-1cac-40e0-9a6f-50aa7151560e" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168706 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e63c0208-0bee-4882-b439-76766480e602" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.168719 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e63c0208-0bee-4882-b439-76766480e602" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168737 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6574608b-c907-4cd1-84b8-6778c893ede3" containerName="extract-content" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.168750 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6574608b-c907-4cd1-84b8-6778c893ede3" containerName="extract-content" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168765 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6955d562-1cac-40e0-9a6f-50aa7151560e" containerName="extract-content" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.168776 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6955d562-1cac-40e0-9a6f-50aa7151560e" containerName="extract-content" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168792 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66bd838b-f358-4404-9c27-00bdffad355e" containerName="extract-utilities" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.168803 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="66bd838b-f358-4404-9c27-00bdffad355e" containerName="extract-utilities" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168819 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6574608b-c907-4cd1-84b8-6778c893ede3" containerName="extract-utilities" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.168831 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6574608b-c907-4cd1-84b8-6778c893ede3" containerName="extract-utilities" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168845 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66bd838b-f358-4404-9c27-00bdffad355e" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.168858 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="66bd838b-f358-4404-9c27-00bdffad355e" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168879 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e63c0208-0bee-4882-b439-76766480e602" containerName="extract-content" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.168891 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e63c0208-0bee-4882-b439-76766480e602" containerName="extract-content" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168909 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e63c0208-0bee-4882-b439-76766480e602" containerName="extract-utilities" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.168921 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e63c0208-0bee-4882-b439-76766480e602" containerName="extract-utilities" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168935 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6955d562-1cac-40e0-9a6f-50aa7151560e" containerName="extract-utilities" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.168948 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6955d562-1cac-40e0-9a6f-50aa7151560e" containerName="extract-utilities" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168964 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66bd838b-f358-4404-9c27-00bdffad355e" containerName="extract-content" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.168975 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="66bd838b-f358-4404-9c27-00bdffad355e" containerName="extract-content" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.168992 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6574608b-c907-4cd1-84b8-6778c893ede3" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.169005 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6574608b-c907-4cd1-84b8-6778c893ede3" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.169020 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21fffc77-e724-4f48-ac20-f21104224241" containerName="oauth-openshift" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.169031 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="21fffc77-e724-4f48-ac20-f21104224241" containerName="oauth-openshift" Nov 25 08:52:50 crc kubenswrapper[4932]: E1125 08:52:50.169049 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06de2009-cc4c-44d7-a855-eaf54f43b3a8" containerName="pruner" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.169060 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="06de2009-cc4c-44d7-a855-eaf54f43b3a8" containerName="pruner" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.169267 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="6574608b-c907-4cd1-84b8-6778c893ede3" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.169296 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="6955d562-1cac-40e0-9a6f-50aa7151560e" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.169310 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="06de2009-cc4c-44d7-a855-eaf54f43b3a8" containerName="pruner" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.169329 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="21fffc77-e724-4f48-ac20-f21104224241" containerName="oauth-openshift" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.169342 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="66bd838b-f358-4404-9c27-00bdffad355e" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.169362 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e63c0208-0bee-4882-b439-76766480e602" containerName="registry-server" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.169967 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.172296 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.172798 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.173601 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.173727 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.173982 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.175308 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.179131 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.180132 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.180473 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.180726 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.180882 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.181903 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.192761 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.195824 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-857d94f549-crnsf"] Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.200360 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.208688 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.288063 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zvf2\" (UniqueName: \"kubernetes.io/projected/b156e019-d41d-4f3f-9824-bab177bc139f-kube-api-access-4zvf2\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.288134 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-router-certs\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.288245 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.289031 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.289131 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-session\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.289173 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-template-error\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.289265 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.289319 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b156e019-d41d-4f3f-9824-bab177bc139f-audit-dir\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.289427 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.289624 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-service-ca\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.289719 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.289775 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-audit-policies\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.289828 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.289953 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-template-login\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.390692 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zvf2\" (UniqueName: \"kubernetes.io/projected/b156e019-d41d-4f3f-9824-bab177bc139f-kube-api-access-4zvf2\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.390763 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-router-certs\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.390809 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.390843 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.390877 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-session\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.390923 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-template-error\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.390959 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.391003 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b156e019-d41d-4f3f-9824-bab177bc139f-audit-dir\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.391046 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.391094 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-service-ca\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.391138 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.391171 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-audit-policies\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.391249 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.391337 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-template-login\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.392030 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/b156e019-d41d-4f3f-9824-bab177bc139f-audit-dir\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.392865 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-service-ca\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.393356 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-audit-policies\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.393914 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.394049 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-cliconfig\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.399715 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-template-error\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.400016 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-session\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.400229 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.400720 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.400911 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-user-template-login\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.401102 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.401392 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-router-certs\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.402662 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/b156e019-d41d-4f3f-9824-bab177bc139f-v4-0-config-system-serving-cert\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.416768 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zvf2\" (UniqueName: \"kubernetes.io/projected/b156e019-d41d-4f3f-9824-bab177bc139f-kube-api-access-4zvf2\") pod \"oauth-openshift-857d94f549-crnsf\" (UID: \"b156e019-d41d-4f3f-9824-bab177bc139f\") " pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.504846 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:50 crc kubenswrapper[4932]: I1125 08:52:50.961369 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-857d94f549-crnsf"] Nov 25 08:52:51 crc kubenswrapper[4932]: I1125 08:52:51.953855 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" event={"ID":"b156e019-d41d-4f3f-9824-bab177bc139f","Type":"ContainerStarted","Data":"b9c7baf8e2158af5ed81f7ff73c8588631a2e03dd7fb46182d91145119e4c510"} Nov 25 08:52:51 crc kubenswrapper[4932]: I1125 08:52:51.955091 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" event={"ID":"b156e019-d41d-4f3f-9824-bab177bc139f","Type":"ContainerStarted","Data":"e4760a9b92c61bfde531a81b4b55b14f3a5097e9e587bec899e643dc0a13c8e0"} Nov 25 08:52:51 crc kubenswrapper[4932]: I1125 08:52:51.955373 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:51 crc kubenswrapper[4932]: I1125 08:52:51.962397 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" Nov 25 08:52:52 crc kubenswrapper[4932]: I1125 08:52:52.007136 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-857d94f549-crnsf" podStartSLOduration=38.007108953 podStartE2EDuration="38.007108953s" podCreationTimestamp="2025-11-25 08:52:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:52:51.980213583 +0000 UTC m=+232.106243166" watchObservedRunningTime="2025-11-25 08:52:52.007108953 +0000 UTC m=+232.133138516" Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.423174 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tjm2s"] Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.424076 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tjm2s" podUID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" containerName="registry-server" containerID="cri-o://5a08c6bda3e2c889ee14eb3eaca412d1fa2fbca37f544b3fccf6d99fe3fa3e91" gracePeriod=30 Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.430992 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-drds8"] Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.431305 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-drds8" podUID="c944be15-8b3b-417f-9640-2c926704f541" containerName="registry-server" containerID="cri-o://5d4c63c0a497f5c34d7d411e410cb59c4a0af17936b2c4ed2b98ce109272d285" gracePeriod=30 Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.451402 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ktv2t"] Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.451854 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" podUID="3776cbbe-bbc8-430b-8db5-881918c75fb2" containerName="marketplace-operator" containerID="cri-o://3425b5f3e5ca8a75d9f08346140291dd8574ad31ccd41e487ceb940a9486a14e" gracePeriod=30 Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.463163 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xtp9"] Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.463472 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5xtp9" podUID="df33346c-a298-4a78-b566-70b2a11eb307" containerName="registry-server" containerID="cri-o://cdc5feabd39c2462aafceb81ecc86a2e669b7f0102a03c45887bfb8d794feee3" gracePeriod=30 Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.467490 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-92hkm"] Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.468244 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.472070 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6h2tt"] Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.472329 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6h2tt" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" containerName="registry-server" containerID="cri-o://5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18" gracePeriod=30 Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.477975 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-92hkm"] Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.634540 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jktkp\" (UniqueName: \"kubernetes.io/projected/e5f276d9-ee2f-45e0-9048-034b3212a733-kube-api-access-jktkp\") pod \"marketplace-operator-79b997595-92hkm\" (UID: \"e5f276d9-ee2f-45e0-9048-034b3212a733\") " pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.634603 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e5f276d9-ee2f-45e0-9048-034b3212a733-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-92hkm\" (UID: \"e5f276d9-ee2f-45e0-9048-034b3212a733\") " pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.634632 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e5f276d9-ee2f-45e0-9048-034b3212a733-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-92hkm\" (UID: \"e5f276d9-ee2f-45e0-9048-034b3212a733\") " pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.735552 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jktkp\" (UniqueName: \"kubernetes.io/projected/e5f276d9-ee2f-45e0-9048-034b3212a733-kube-api-access-jktkp\") pod \"marketplace-operator-79b997595-92hkm\" (UID: \"e5f276d9-ee2f-45e0-9048-034b3212a733\") " pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.735623 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e5f276d9-ee2f-45e0-9048-034b3212a733-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-92hkm\" (UID: \"e5f276d9-ee2f-45e0-9048-034b3212a733\") " pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.735651 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e5f276d9-ee2f-45e0-9048-034b3212a733-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-92hkm\" (UID: \"e5f276d9-ee2f-45e0-9048-034b3212a733\") " pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.737274 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e5f276d9-ee2f-45e0-9048-034b3212a733-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-92hkm\" (UID: \"e5f276d9-ee2f-45e0-9048-034b3212a733\") " pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.745060 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e5f276d9-ee2f-45e0-9048-034b3212a733-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-92hkm\" (UID: \"e5f276d9-ee2f-45e0-9048-034b3212a733\") " pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.751702 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jktkp\" (UniqueName: \"kubernetes.io/projected/e5f276d9-ee2f-45e0-9048-034b3212a733-kube-api-access-jktkp\") pod \"marketplace-operator-79b997595-92hkm\" (UID: \"e5f276d9-ee2f-45e0-9048-034b3212a733\") " pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:11 crc kubenswrapper[4932]: I1125 08:53:11.786794 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.007545 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.060386 4932 generic.go:334] "Generic (PLEG): container finished" podID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" containerID="5a08c6bda3e2c889ee14eb3eaca412d1fa2fbca37f544b3fccf6d99fe3fa3e91" exitCode=0 Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.060455 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjm2s" event={"ID":"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd","Type":"ContainerDied","Data":"5a08c6bda3e2c889ee14eb3eaca412d1fa2fbca37f544b3fccf6d99fe3fa3e91"} Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.062502 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.067353 4932 generic.go:334] "Generic (PLEG): container finished" podID="df33346c-a298-4a78-b566-70b2a11eb307" containerID="cdc5feabd39c2462aafceb81ecc86a2e669b7f0102a03c45887bfb8d794feee3" exitCode=0 Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.067411 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xtp9" event={"ID":"df33346c-a298-4a78-b566-70b2a11eb307","Type":"ContainerDied","Data":"cdc5feabd39c2462aafceb81ecc86a2e669b7f0102a03c45887bfb8d794feee3"} Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.067447 4932 scope.go:117] "RemoveContainer" containerID="cdc5feabd39c2462aafceb81ecc86a2e669b7f0102a03c45887bfb8d794feee3" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.086305 4932 generic.go:334] "Generic (PLEG): container finished" podID="c944be15-8b3b-417f-9640-2c926704f541" containerID="5d4c63c0a497f5c34d7d411e410cb59c4a0af17936b2c4ed2b98ce109272d285" exitCode=0 Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.086371 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drds8" event={"ID":"c944be15-8b3b-417f-9640-2c926704f541","Type":"ContainerDied","Data":"5d4c63c0a497f5c34d7d411e410cb59c4a0af17936b2c4ed2b98ce109272d285"} Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.090262 4932 generic.go:334] "Generic (PLEG): container finished" podID="3776cbbe-bbc8-430b-8db5-881918c75fb2" containerID="3425b5f3e5ca8a75d9f08346140291dd8574ad31ccd41e487ceb940a9486a14e" exitCode=0 Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.090315 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" event={"ID":"3776cbbe-bbc8-430b-8db5-881918c75fb2","Type":"ContainerDied","Data":"3425b5f3e5ca8a75d9f08346140291dd8574ad31ccd41e487ceb940a9486a14e"} Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.092074 4932 generic.go:334] "Generic (PLEG): container finished" podID="cec571af-8e80-4ddb-8218-deab898f34cd" containerID="5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18" exitCode=0 Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.092212 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6h2tt" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.092259 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6h2tt" event={"ID":"cec571af-8e80-4ddb-8218-deab898f34cd","Type":"ContainerDied","Data":"5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18"} Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.092312 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6h2tt" event={"ID":"cec571af-8e80-4ddb-8218-deab898f34cd","Type":"ContainerDied","Data":"9eea03411956af59407bf9215d231de939a25b415eb4088d2a46e8514098b63d"} Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.102005 4932 scope.go:117] "RemoveContainer" containerID="0e70554b4e33e91772ff1d1af9dea6a78541c9cf1f3cc6fabaadb2a7745ecc9a" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.124898 4932 scope.go:117] "RemoveContainer" containerID="4fa7771dc0a83053f45aa0de64c44b226d77f5ddc4a00078517ab63749e772d8" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.139559 4932 scope.go:117] "RemoveContainer" containerID="5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.139812 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-utilities\") pod \"cec571af-8e80-4ddb-8218-deab898f34cd\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.139846 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-catalog-content\") pod \"cec571af-8e80-4ddb-8218-deab898f34cd\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.139919 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mncxn\" (UniqueName: \"kubernetes.io/projected/cec571af-8e80-4ddb-8218-deab898f34cd-kube-api-access-mncxn\") pod \"cec571af-8e80-4ddb-8218-deab898f34cd\" (UID: \"cec571af-8e80-4ddb-8218-deab898f34cd\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.140761 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-utilities" (OuterVolumeSpecName: "utilities") pod "cec571af-8e80-4ddb-8218-deab898f34cd" (UID: "cec571af-8e80-4ddb-8218-deab898f34cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.144642 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cec571af-8e80-4ddb-8218-deab898f34cd-kube-api-access-mncxn" (OuterVolumeSpecName: "kube-api-access-mncxn") pod "cec571af-8e80-4ddb-8218-deab898f34cd" (UID: "cec571af-8e80-4ddb-8218-deab898f34cd"). InnerVolumeSpecName "kube-api-access-mncxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.162984 4932 scope.go:117] "RemoveContainer" containerID="cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.185980 4932 scope.go:117] "RemoveContainer" containerID="18413151967a1c9c3ba8ec6f80b50cd938602707e79267a9b2a6be081a2a83c2" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.202728 4932 scope.go:117] "RemoveContainer" containerID="5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18" Nov 25 08:53:12 crc kubenswrapper[4932]: E1125 08:53:12.203449 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18\": container with ID starting with 5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18 not found: ID does not exist" containerID="5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.203505 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18"} err="failed to get container status \"5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18\": rpc error: code = NotFound desc = could not find container \"5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18\": container with ID starting with 5ac4d416e307501f500da11051d50b67c8580d561295a5c15af91db086298c18 not found: ID does not exist" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.203535 4932 scope.go:117] "RemoveContainer" containerID="cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904" Nov 25 08:53:12 crc kubenswrapper[4932]: E1125 08:53:12.203983 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904\": container with ID starting with cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904 not found: ID does not exist" containerID="cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.204019 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904"} err="failed to get container status \"cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904\": rpc error: code = NotFound desc = could not find container \"cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904\": container with ID starting with cc969bb6657b01372b1fcd97034cb82de2e4ecca3e9d675c73d7f6b8b9da3904 not found: ID does not exist" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.204052 4932 scope.go:117] "RemoveContainer" containerID="18413151967a1c9c3ba8ec6f80b50cd938602707e79267a9b2a6be081a2a83c2" Nov 25 08:53:12 crc kubenswrapper[4932]: E1125 08:53:12.204277 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18413151967a1c9c3ba8ec6f80b50cd938602707e79267a9b2a6be081a2a83c2\": container with ID starting with 18413151967a1c9c3ba8ec6f80b50cd938602707e79267a9b2a6be081a2a83c2 not found: ID does not exist" containerID="18413151967a1c9c3ba8ec6f80b50cd938602707e79267a9b2a6be081a2a83c2" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.204389 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18413151967a1c9c3ba8ec6f80b50cd938602707e79267a9b2a6be081a2a83c2"} err="failed to get container status \"18413151967a1c9c3ba8ec6f80b50cd938602707e79267a9b2a6be081a2a83c2\": rpc error: code = NotFound desc = could not find container \"18413151967a1c9c3ba8ec6f80b50cd938602707e79267a9b2a6be081a2a83c2\": container with ID starting with 18413151967a1c9c3ba8ec6f80b50cd938602707e79267a9b2a6be081a2a83c2 not found: ID does not exist" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.223227 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-92hkm"] Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.242136 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-utilities\") pod \"df33346c-a298-4a78-b566-70b2a11eb307\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.242278 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-catalog-content\") pod \"df33346c-a298-4a78-b566-70b2a11eb307\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.242327 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hx5l6\" (UniqueName: \"kubernetes.io/projected/df33346c-a298-4a78-b566-70b2a11eb307-kube-api-access-hx5l6\") pod \"df33346c-a298-4a78-b566-70b2a11eb307\" (UID: \"df33346c-a298-4a78-b566-70b2a11eb307\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.242576 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mncxn\" (UniqueName: \"kubernetes.io/projected/cec571af-8e80-4ddb-8218-deab898f34cd-kube-api-access-mncxn\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.242673 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.244767 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-utilities" (OuterVolumeSpecName: "utilities") pod "df33346c-a298-4a78-b566-70b2a11eb307" (UID: "df33346c-a298-4a78-b566-70b2a11eb307"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.247990 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cec571af-8e80-4ddb-8218-deab898f34cd" (UID: "cec571af-8e80-4ddb-8218-deab898f34cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.248367 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df33346c-a298-4a78-b566-70b2a11eb307-kube-api-access-hx5l6" (OuterVolumeSpecName: "kube-api-access-hx5l6") pod "df33346c-a298-4a78-b566-70b2a11eb307" (UID: "df33346c-a298-4a78-b566-70b2a11eb307"). InnerVolumeSpecName "kube-api-access-hx5l6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.268059 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "df33346c-a298-4a78-b566-70b2a11eb307" (UID: "df33346c-a298-4a78-b566-70b2a11eb307"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.325918 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.344641 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.344677 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cec571af-8e80-4ddb-8218-deab898f34cd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.344692 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df33346c-a298-4a78-b566-70b2a11eb307-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.344703 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hx5l6\" (UniqueName: \"kubernetes.io/projected/df33346c-a298-4a78-b566-70b2a11eb307-kube-api-access-hx5l6\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.352880 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-drds8" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.383858 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.426254 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6h2tt"] Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.427434 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6h2tt"] Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.445489 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-utilities\") pod \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.445565 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-catalog-content\") pod \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.446654 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-utilities" (OuterVolumeSpecName: "utilities") pod "8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" (UID: "8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.454376 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4b4mx\" (UniqueName: \"kubernetes.io/projected/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-kube-api-access-4b4mx\") pod \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\" (UID: \"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.454421 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-trusted-ca\") pod \"3776cbbe-bbc8-430b-8db5-881918c75fb2\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.454464 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-catalog-content\") pod \"c944be15-8b3b-417f-9640-2c926704f541\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.454488 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-utilities\") pod \"c944be15-8b3b-417f-9640-2c926704f541\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.454796 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.454965 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "3776cbbe-bbc8-430b-8db5-881918c75fb2" (UID: "3776cbbe-bbc8-430b-8db5-881918c75fb2"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.455607 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-utilities" (OuterVolumeSpecName: "utilities") pod "c944be15-8b3b-417f-9640-2c926704f541" (UID: "c944be15-8b3b-417f-9640-2c926704f541"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.463242 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-kube-api-access-4b4mx" (OuterVolumeSpecName: "kube-api-access-4b4mx") pod "8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" (UID: "8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd"). InnerVolumeSpecName "kube-api-access-4b4mx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.496892 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" (UID: "8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.508348 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c944be15-8b3b-417f-9640-2c926704f541" (UID: "c944be15-8b3b-417f-9640-2c926704f541"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.555759 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-operator-metrics\") pod \"3776cbbe-bbc8-430b-8db5-881918c75fb2\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.555852 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzcjb\" (UniqueName: \"kubernetes.io/projected/3776cbbe-bbc8-430b-8db5-881918c75fb2-kube-api-access-rzcjb\") pod \"3776cbbe-bbc8-430b-8db5-881918c75fb2\" (UID: \"3776cbbe-bbc8-430b-8db5-881918c75fb2\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.555888 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rszq4\" (UniqueName: \"kubernetes.io/projected/c944be15-8b3b-417f-9640-2c926704f541-kube-api-access-rszq4\") pod \"c944be15-8b3b-417f-9640-2c926704f541\" (UID: \"c944be15-8b3b-417f-9640-2c926704f541\") " Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.556134 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.556158 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c944be15-8b3b-417f-9640-2c926704f541-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.556169 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.556183 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4b4mx\" (UniqueName: \"kubernetes.io/projected/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd-kube-api-access-4b4mx\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.556213 4932 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.566432 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c944be15-8b3b-417f-9640-2c926704f541-kube-api-access-rszq4" (OuterVolumeSpecName: "kube-api-access-rszq4") pod "c944be15-8b3b-417f-9640-2c926704f541" (UID: "c944be15-8b3b-417f-9640-2c926704f541"). InnerVolumeSpecName "kube-api-access-rszq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.566471 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "3776cbbe-bbc8-430b-8db5-881918c75fb2" (UID: "3776cbbe-bbc8-430b-8db5-881918c75fb2"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.566664 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3776cbbe-bbc8-430b-8db5-881918c75fb2-kube-api-access-rzcjb" (OuterVolumeSpecName: "kube-api-access-rzcjb") pod "3776cbbe-bbc8-430b-8db5-881918c75fb2" (UID: "3776cbbe-bbc8-430b-8db5-881918c75fb2"). InnerVolumeSpecName "kube-api-access-rzcjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.612129 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" path="/var/lib/kubelet/pods/cec571af-8e80-4ddb-8218-deab898f34cd/volumes" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.657316 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzcjb\" (UniqueName: \"kubernetes.io/projected/3776cbbe-bbc8-430b-8db5-881918c75fb2-kube-api-access-rzcjb\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.657355 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rszq4\" (UniqueName: \"kubernetes.io/projected/c944be15-8b3b-417f-9640-2c926704f541-kube-api-access-rszq4\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:12 crc kubenswrapper[4932]: I1125 08:53:12.657365 4932 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/3776cbbe-bbc8-430b-8db5-881918c75fb2-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.098951 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drds8" event={"ID":"c944be15-8b3b-417f-9640-2c926704f541","Type":"ContainerDied","Data":"c5fb013ce43b99e90d0d7247406197519d9d4393d3f400e54b840a0a52f13583"} Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.099220 4932 scope.go:117] "RemoveContainer" containerID="5d4c63c0a497f5c34d7d411e410cb59c4a0af17936b2c4ed2b98ce109272d285" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.098972 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-drds8" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.100330 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.100344 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-ktv2t" event={"ID":"3776cbbe-bbc8-430b-8db5-881918c75fb2","Type":"ContainerDied","Data":"79c393c6ddb80ead372950870075f64d3a131a3f40d599c2b586042870812703"} Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.106688 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tjm2s" event={"ID":"8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd","Type":"ContainerDied","Data":"5593a0fad723c29fa273ff97924199acb949b473ee59467a55d4682924e7941b"} Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.106936 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tjm2s" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.108787 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" event={"ID":"e5f276d9-ee2f-45e0-9048-034b3212a733","Type":"ContainerStarted","Data":"9df4c4452f2a13cbc5bef6d31d794f6725db8e0370dc72ef082483c1b71ae0b6"} Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.108835 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" event={"ID":"e5f276d9-ee2f-45e0-9048-034b3212a733","Type":"ContainerStarted","Data":"fe6264d2d3d19b1d399a2e21b075b5c39fb90f06ab5d20de1fd6965b33a5be1f"} Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.109740 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.114413 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.114694 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xtp9" event={"ID":"df33346c-a298-4a78-b566-70b2a11eb307","Type":"ContainerDied","Data":"8841cf54801260f6337acfcaa438460336160a19066a98c56bd10ceec27e21b0"} Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.114797 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xtp9" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.120367 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ktv2t"] Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.120792 4932 scope.go:117] "RemoveContainer" containerID="622ab97cae7dffb92fbdfa54c75588b591b1eaf16fb6eb11d106a36f02025702" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.122658 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-ktv2t"] Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.134880 4932 scope.go:117] "RemoveContainer" containerID="889d2be148af28811a1e15f3301374ddd550b46329cf414c02fa81a707decb2b" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.135858 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-drds8"] Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.137996 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-drds8"] Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.151078 4932 scope.go:117] "RemoveContainer" containerID="3425b5f3e5ca8a75d9f08346140291dd8574ad31ccd41e487ceb940a9486a14e" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.159539 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-92hkm" podStartSLOduration=2.159492771 podStartE2EDuration="2.159492771s" podCreationTimestamp="2025-11-25 08:53:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:53:13.149503261 +0000 UTC m=+253.275532824" watchObservedRunningTime="2025-11-25 08:53:13.159492771 +0000 UTC m=+253.285522364" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.175272 4932 scope.go:117] "RemoveContainer" containerID="5a08c6bda3e2c889ee14eb3eaca412d1fa2fbca37f544b3fccf6d99fe3fa3e91" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.179997 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tjm2s"] Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.182857 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tjm2s"] Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.186512 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xtp9"] Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.190120 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xtp9"] Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.193543 4932 scope.go:117] "RemoveContainer" containerID="d0e49e1d619071ff97438d76cc42144429ed8b0c6aa1080b0cf015c5008a595a" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.206266 4932 scope.go:117] "RemoveContainer" containerID="14ba72eff3f817c7968890efaa19b3910341a7dedab2ef3902345fbcfbd0919f" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640298 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mwss5"] Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640515 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" containerName="extract-content" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640531 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" containerName="extract-content" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640542 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" containerName="extract-utilities" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640548 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" containerName="extract-utilities" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640562 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df33346c-a298-4a78-b566-70b2a11eb307" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640572 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="df33346c-a298-4a78-b566-70b2a11eb307" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640580 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c944be15-8b3b-417f-9640-2c926704f541" containerName="extract-content" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640587 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c944be15-8b3b-417f-9640-2c926704f541" containerName="extract-content" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640600 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3776cbbe-bbc8-430b-8db5-881918c75fb2" containerName="marketplace-operator" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640608 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3776cbbe-bbc8-430b-8db5-881918c75fb2" containerName="marketplace-operator" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640616 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" containerName="extract-content" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640622 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" containerName="extract-content" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640632 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" containerName="extract-utilities" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640639 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" containerName="extract-utilities" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640650 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df33346c-a298-4a78-b566-70b2a11eb307" containerName="extract-content" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640657 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="df33346c-a298-4a78-b566-70b2a11eb307" containerName="extract-content" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640668 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640675 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640686 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c944be15-8b3b-417f-9640-2c926704f541" containerName="extract-utilities" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640697 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c944be15-8b3b-417f-9640-2c926704f541" containerName="extract-utilities" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640709 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c944be15-8b3b-417f-9640-2c926704f541" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640717 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c944be15-8b3b-417f-9640-2c926704f541" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640728 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df33346c-a298-4a78-b566-70b2a11eb307" containerName="extract-utilities" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640735 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="df33346c-a298-4a78-b566-70b2a11eb307" containerName="extract-utilities" Nov 25 08:53:13 crc kubenswrapper[4932]: E1125 08:53:13.640745 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640754 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640874 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="df33346c-a298-4a78-b566-70b2a11eb307" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640891 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="cec571af-8e80-4ddb-8218-deab898f34cd" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640906 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3776cbbe-bbc8-430b-8db5-881918c75fb2" containerName="marketplace-operator" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640916 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.640927 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c944be15-8b3b-417f-9640-2c926704f541" containerName="registry-server" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.643257 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.645047 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.656401 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mwss5"] Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.668023 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41cec203-4664-4392-affb-95616385152a-catalog-content\") pod \"redhat-marketplace-mwss5\" (UID: \"41cec203-4664-4392-affb-95616385152a\") " pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.668114 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41cec203-4664-4392-affb-95616385152a-utilities\") pod \"redhat-marketplace-mwss5\" (UID: \"41cec203-4664-4392-affb-95616385152a\") " pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.668143 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4p5n\" (UniqueName: \"kubernetes.io/projected/41cec203-4664-4392-affb-95616385152a-kube-api-access-f4p5n\") pod \"redhat-marketplace-mwss5\" (UID: \"41cec203-4664-4392-affb-95616385152a\") " pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.768841 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41cec203-4664-4392-affb-95616385152a-utilities\") pod \"redhat-marketplace-mwss5\" (UID: \"41cec203-4664-4392-affb-95616385152a\") " pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.768886 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4p5n\" (UniqueName: \"kubernetes.io/projected/41cec203-4664-4392-affb-95616385152a-kube-api-access-f4p5n\") pod \"redhat-marketplace-mwss5\" (UID: \"41cec203-4664-4392-affb-95616385152a\") " pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.768944 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41cec203-4664-4392-affb-95616385152a-catalog-content\") pod \"redhat-marketplace-mwss5\" (UID: \"41cec203-4664-4392-affb-95616385152a\") " pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.769321 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41cec203-4664-4392-affb-95616385152a-utilities\") pod \"redhat-marketplace-mwss5\" (UID: \"41cec203-4664-4392-affb-95616385152a\") " pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.771716 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41cec203-4664-4392-affb-95616385152a-catalog-content\") pod \"redhat-marketplace-mwss5\" (UID: \"41cec203-4664-4392-affb-95616385152a\") " pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.788235 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4p5n\" (UniqueName: \"kubernetes.io/projected/41cec203-4664-4392-affb-95616385152a-kube-api-access-f4p5n\") pod \"redhat-marketplace-mwss5\" (UID: \"41cec203-4664-4392-affb-95616385152a\") " pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.843349 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jp757"] Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.848168 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.851179 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.855691 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jp757"] Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.869845 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-catalog-content\") pod \"redhat-operators-jp757\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.869961 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84rql\" (UniqueName: \"kubernetes.io/projected/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-kube-api-access-84rql\") pod \"redhat-operators-jp757\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.869992 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-utilities\") pod \"redhat-operators-jp757\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.962255 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.971387 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-catalog-content\") pod \"redhat-operators-jp757\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.971544 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84rql\" (UniqueName: \"kubernetes.io/projected/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-kube-api-access-84rql\") pod \"redhat-operators-jp757\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.971592 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-utilities\") pod \"redhat-operators-jp757\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.971845 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-catalog-content\") pod \"redhat-operators-jp757\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.972026 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-utilities\") pod \"redhat-operators-jp757\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:13 crc kubenswrapper[4932]: I1125 08:53:13.991715 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84rql\" (UniqueName: \"kubernetes.io/projected/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-kube-api-access-84rql\") pod \"redhat-operators-jp757\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:14 crc kubenswrapper[4932]: I1125 08:53:14.160869 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:14 crc kubenswrapper[4932]: I1125 08:53:14.345115 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mwss5"] Nov 25 08:53:14 crc kubenswrapper[4932]: W1125 08:53:14.351904 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod41cec203_4664_4392_affb_95616385152a.slice/crio-fde1906739fef6da26820b06d123458f20bf0471e955c671b29b0f06dd0a8ca7 WatchSource:0}: Error finding container fde1906739fef6da26820b06d123458f20bf0471e955c671b29b0f06dd0a8ca7: Status 404 returned error can't find the container with id fde1906739fef6da26820b06d123458f20bf0471e955c671b29b0f06dd0a8ca7 Nov 25 08:53:14 crc kubenswrapper[4932]: I1125 08:53:14.530028 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jp757"] Nov 25 08:53:14 crc kubenswrapper[4932]: W1125 08:53:14.568768 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b9b79ce_bfdb_466e_96b6_5ed330ba1f2d.slice/crio-15103fb9bf3bcdc818d56c5486341bb712d6fe537eb6855091a6c664e24d91f5 WatchSource:0}: Error finding container 15103fb9bf3bcdc818d56c5486341bb712d6fe537eb6855091a6c664e24d91f5: Status 404 returned error can't find the container with id 15103fb9bf3bcdc818d56c5486341bb712d6fe537eb6855091a6c664e24d91f5 Nov 25 08:53:14 crc kubenswrapper[4932]: I1125 08:53:14.612044 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3776cbbe-bbc8-430b-8db5-881918c75fb2" path="/var/lib/kubelet/pods/3776cbbe-bbc8-430b-8db5-881918c75fb2/volumes" Nov 25 08:53:14 crc kubenswrapper[4932]: I1125 08:53:14.612518 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd" path="/var/lib/kubelet/pods/8f7572c0-6f7b-4a32-9ff8-0aa3e98293cd/volumes" Nov 25 08:53:14 crc kubenswrapper[4932]: I1125 08:53:14.613110 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c944be15-8b3b-417f-9640-2c926704f541" path="/var/lib/kubelet/pods/c944be15-8b3b-417f-9640-2c926704f541/volumes" Nov 25 08:53:14 crc kubenswrapper[4932]: I1125 08:53:14.614334 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df33346c-a298-4a78-b566-70b2a11eb307" path="/var/lib/kubelet/pods/df33346c-a298-4a78-b566-70b2a11eb307/volumes" Nov 25 08:53:15 crc kubenswrapper[4932]: I1125 08:53:15.127915 4932 generic.go:334] "Generic (PLEG): container finished" podID="41cec203-4664-4392-affb-95616385152a" containerID="dc73f7adf3c2700327e5e7824f28f389a2eb434b927752183fd540534777619d" exitCode=0 Nov 25 08:53:15 crc kubenswrapper[4932]: I1125 08:53:15.127957 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mwss5" event={"ID":"41cec203-4664-4392-affb-95616385152a","Type":"ContainerDied","Data":"dc73f7adf3c2700327e5e7824f28f389a2eb434b927752183fd540534777619d"} Nov 25 08:53:15 crc kubenswrapper[4932]: I1125 08:53:15.128315 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mwss5" event={"ID":"41cec203-4664-4392-affb-95616385152a","Type":"ContainerStarted","Data":"fde1906739fef6da26820b06d123458f20bf0471e955c671b29b0f06dd0a8ca7"} Nov 25 08:53:15 crc kubenswrapper[4932]: I1125 08:53:15.129019 4932 generic.go:334] "Generic (PLEG): container finished" podID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" containerID="dc23ab802f32d3709efb2f8b85db6ccf48eb172fc671b8ad6b005c5bd4b5dcee" exitCode=0 Nov 25 08:53:15 crc kubenswrapper[4932]: I1125 08:53:15.130217 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jp757" event={"ID":"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d","Type":"ContainerDied","Data":"dc23ab802f32d3709efb2f8b85db6ccf48eb172fc671b8ad6b005c5bd4b5dcee"} Nov 25 08:53:15 crc kubenswrapper[4932]: I1125 08:53:15.130234 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jp757" event={"ID":"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d","Type":"ContainerStarted","Data":"15103fb9bf3bcdc818d56c5486341bb712d6fe537eb6855091a6c664e24d91f5"} Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.045345 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-95f8n"] Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.047447 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.050151 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.058343 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-95f8n"] Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.137146 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jp757" event={"ID":"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d","Type":"ContainerStarted","Data":"e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6"} Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.140625 4932 generic.go:334] "Generic (PLEG): container finished" podID="41cec203-4664-4392-affb-95616385152a" containerID="323dc4d46be63c31a5e9936def062d175d3f3d91958be4b47e000db3ba73b7c9" exitCode=0 Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.140668 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mwss5" event={"ID":"41cec203-4664-4392-affb-95616385152a","Type":"ContainerDied","Data":"323dc4d46be63c31a5e9936def062d175d3f3d91958be4b47e000db3ba73b7c9"} Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.195924 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b80e081-793b-44f7-9e32-75f054f51370-catalog-content\") pod \"community-operators-95f8n\" (UID: \"8b80e081-793b-44f7-9e32-75f054f51370\") " pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.196253 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssjlr\" (UniqueName: \"kubernetes.io/projected/8b80e081-793b-44f7-9e32-75f054f51370-kube-api-access-ssjlr\") pod \"community-operators-95f8n\" (UID: \"8b80e081-793b-44f7-9e32-75f054f51370\") " pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.196395 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b80e081-793b-44f7-9e32-75f054f51370-utilities\") pod \"community-operators-95f8n\" (UID: \"8b80e081-793b-44f7-9e32-75f054f51370\") " pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.243328 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bfvv5"] Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.244556 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.246452 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.252872 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bfvv5"] Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.297651 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b80e081-793b-44f7-9e32-75f054f51370-catalog-content\") pod \"community-operators-95f8n\" (UID: \"8b80e081-793b-44f7-9e32-75f054f51370\") " pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.297739 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssjlr\" (UniqueName: \"kubernetes.io/projected/8b80e081-793b-44f7-9e32-75f054f51370-kube-api-access-ssjlr\") pod \"community-operators-95f8n\" (UID: \"8b80e081-793b-44f7-9e32-75f054f51370\") " pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.297774 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b80e081-793b-44f7-9e32-75f054f51370-utilities\") pod \"community-operators-95f8n\" (UID: \"8b80e081-793b-44f7-9e32-75f054f51370\") " pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.298252 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b80e081-793b-44f7-9e32-75f054f51370-utilities\") pod \"community-operators-95f8n\" (UID: \"8b80e081-793b-44f7-9e32-75f054f51370\") " pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.298587 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b80e081-793b-44f7-9e32-75f054f51370-catalog-content\") pod \"community-operators-95f8n\" (UID: \"8b80e081-793b-44f7-9e32-75f054f51370\") " pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.334444 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssjlr\" (UniqueName: \"kubernetes.io/projected/8b80e081-793b-44f7-9e32-75f054f51370-kube-api-access-ssjlr\") pod \"community-operators-95f8n\" (UID: \"8b80e081-793b-44f7-9e32-75f054f51370\") " pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.370547 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.398581 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zzg8\" (UniqueName: \"kubernetes.io/projected/7af73cde-771e-4ea0-b5c1-549cdee5181f-kube-api-access-5zzg8\") pod \"certified-operators-bfvv5\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.398632 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-catalog-content\") pod \"certified-operators-bfvv5\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.398694 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-utilities\") pod \"certified-operators-bfvv5\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.500653 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zzg8\" (UniqueName: \"kubernetes.io/projected/7af73cde-771e-4ea0-b5c1-549cdee5181f-kube-api-access-5zzg8\") pod \"certified-operators-bfvv5\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.503019 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-catalog-content\") pod \"certified-operators-bfvv5\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.503217 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-catalog-content\") pod \"certified-operators-bfvv5\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.503416 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-utilities\") pod \"certified-operators-bfvv5\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.503780 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-utilities\") pod \"certified-operators-bfvv5\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.527126 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zzg8\" (UniqueName: \"kubernetes.io/projected/7af73cde-771e-4ea0-b5c1-549cdee5181f-kube-api-access-5zzg8\") pod \"certified-operators-bfvv5\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.627867 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:16 crc kubenswrapper[4932]: I1125 08:53:16.752764 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-95f8n"] Nov 25 08:53:16 crc kubenswrapper[4932]: W1125 08:53:16.762346 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b80e081_793b_44f7_9e32_75f054f51370.slice/crio-479005491a69175936fc8d0886a7ebd4ce2e82242bf448e24aabd43be4f1f43a WatchSource:0}: Error finding container 479005491a69175936fc8d0886a7ebd4ce2e82242bf448e24aabd43be4f1f43a: Status 404 returned error can't find the container with id 479005491a69175936fc8d0886a7ebd4ce2e82242bf448e24aabd43be4f1f43a Nov 25 08:53:17 crc kubenswrapper[4932]: I1125 08:53:17.048051 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bfvv5"] Nov 25 08:53:17 crc kubenswrapper[4932]: W1125 08:53:17.055983 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7af73cde_771e_4ea0_b5c1_549cdee5181f.slice/crio-da06a75c1280a801b30717a498efc51e89fab0ccd41824475f4331c540d78a1f WatchSource:0}: Error finding container da06a75c1280a801b30717a498efc51e89fab0ccd41824475f4331c540d78a1f: Status 404 returned error can't find the container with id da06a75c1280a801b30717a498efc51e89fab0ccd41824475f4331c540d78a1f Nov 25 08:53:17 crc kubenswrapper[4932]: I1125 08:53:17.150663 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mwss5" event={"ID":"41cec203-4664-4392-affb-95616385152a","Type":"ContainerStarted","Data":"dd8c0cf67fd6cb5ed300dcb54e53c4c1c6c02dee7164e566aaadc6515beef4b7"} Nov 25 08:53:17 crc kubenswrapper[4932]: I1125 08:53:17.154002 4932 generic.go:334] "Generic (PLEG): container finished" podID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" containerID="e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6" exitCode=0 Nov 25 08:53:17 crc kubenswrapper[4932]: I1125 08:53:17.154052 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jp757" event={"ID":"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d","Type":"ContainerDied","Data":"e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6"} Nov 25 08:53:17 crc kubenswrapper[4932]: I1125 08:53:17.158278 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfvv5" event={"ID":"7af73cde-771e-4ea0-b5c1-549cdee5181f","Type":"ContainerStarted","Data":"da06a75c1280a801b30717a498efc51e89fab0ccd41824475f4331c540d78a1f"} Nov 25 08:53:17 crc kubenswrapper[4932]: I1125 08:53:17.161560 4932 generic.go:334] "Generic (PLEG): container finished" podID="8b80e081-793b-44f7-9e32-75f054f51370" containerID="269505e594581828f2e4bb96b7257e136282881e1868acc03f2b8eff5b865ab4" exitCode=0 Nov 25 08:53:17 crc kubenswrapper[4932]: I1125 08:53:17.161609 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-95f8n" event={"ID":"8b80e081-793b-44f7-9e32-75f054f51370","Type":"ContainerDied","Data":"269505e594581828f2e4bb96b7257e136282881e1868acc03f2b8eff5b865ab4"} Nov 25 08:53:17 crc kubenswrapper[4932]: I1125 08:53:17.161650 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-95f8n" event={"ID":"8b80e081-793b-44f7-9e32-75f054f51370","Type":"ContainerStarted","Data":"479005491a69175936fc8d0886a7ebd4ce2e82242bf448e24aabd43be4f1f43a"} Nov 25 08:53:17 crc kubenswrapper[4932]: I1125 08:53:17.174867 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mwss5" podStartSLOduration=2.376333646 podStartE2EDuration="4.174843091s" podCreationTimestamp="2025-11-25 08:53:13 +0000 UTC" firstStartedPulling="2025-11-25 08:53:15.130289102 +0000 UTC m=+255.256318665" lastFinishedPulling="2025-11-25 08:53:16.928798547 +0000 UTC m=+257.054828110" observedRunningTime="2025-11-25 08:53:17.170592788 +0000 UTC m=+257.296622351" watchObservedRunningTime="2025-11-25 08:53:17.174843091 +0000 UTC m=+257.300872654" Nov 25 08:53:18 crc kubenswrapper[4932]: I1125 08:53:18.169307 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jp757" event={"ID":"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d","Type":"ContainerStarted","Data":"159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9"} Nov 25 08:53:18 crc kubenswrapper[4932]: I1125 08:53:18.172408 4932 generic.go:334] "Generic (PLEG): container finished" podID="7af73cde-771e-4ea0-b5c1-549cdee5181f" containerID="70fd2fadf1d54985bb09a0fd5d68be422d3360a05690aeb0c66d6c926a433387" exitCode=0 Nov 25 08:53:18 crc kubenswrapper[4932]: I1125 08:53:18.172497 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfvv5" event={"ID":"7af73cde-771e-4ea0-b5c1-549cdee5181f","Type":"ContainerDied","Data":"70fd2fadf1d54985bb09a0fd5d68be422d3360a05690aeb0c66d6c926a433387"} Nov 25 08:53:18 crc kubenswrapper[4932]: I1125 08:53:18.175014 4932 generic.go:334] "Generic (PLEG): container finished" podID="8b80e081-793b-44f7-9e32-75f054f51370" containerID="115bef1e14c0f7485e6e3ed692d30ab880b84d6c4d79e3d4b054d70c446e0ec2" exitCode=0 Nov 25 08:53:18 crc kubenswrapper[4932]: I1125 08:53:18.176240 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-95f8n" event={"ID":"8b80e081-793b-44f7-9e32-75f054f51370","Type":"ContainerDied","Data":"115bef1e14c0f7485e6e3ed692d30ab880b84d6c4d79e3d4b054d70c446e0ec2"} Nov 25 08:53:18 crc kubenswrapper[4932]: I1125 08:53:18.186001 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jp757" podStartSLOduration=2.546980524 podStartE2EDuration="5.185981798s" podCreationTimestamp="2025-11-25 08:53:13 +0000 UTC" firstStartedPulling="2025-11-25 08:53:15.130762416 +0000 UTC m=+255.256791979" lastFinishedPulling="2025-11-25 08:53:17.76976369 +0000 UTC m=+257.895793253" observedRunningTime="2025-11-25 08:53:18.184430893 +0000 UTC m=+258.310460456" watchObservedRunningTime="2025-11-25 08:53:18.185981798 +0000 UTC m=+258.312011371" Nov 25 08:53:19 crc kubenswrapper[4932]: I1125 08:53:19.182351 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfvv5" event={"ID":"7af73cde-771e-4ea0-b5c1-549cdee5181f","Type":"ContainerStarted","Data":"41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029"} Nov 25 08:53:19 crc kubenswrapper[4932]: I1125 08:53:19.185318 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-95f8n" event={"ID":"8b80e081-793b-44f7-9e32-75f054f51370","Type":"ContainerStarted","Data":"453d242c9592c4f275ae49c7ca1bd12d707653568a0f260921006a587a0b748f"} Nov 25 08:53:19 crc kubenswrapper[4932]: I1125 08:53:19.235697 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-95f8n" podStartSLOduration=1.603136459 podStartE2EDuration="3.235678703s" podCreationTimestamp="2025-11-25 08:53:16 +0000 UTC" firstStartedPulling="2025-11-25 08:53:17.163008198 +0000 UTC m=+257.289037761" lastFinishedPulling="2025-11-25 08:53:18.795550442 +0000 UTC m=+258.921580005" observedRunningTime="2025-11-25 08:53:19.231701598 +0000 UTC m=+259.357731401" watchObservedRunningTime="2025-11-25 08:53:19.235678703 +0000 UTC m=+259.361708266" Nov 25 08:53:20 crc kubenswrapper[4932]: I1125 08:53:20.190225 4932 generic.go:334] "Generic (PLEG): container finished" podID="7af73cde-771e-4ea0-b5c1-549cdee5181f" containerID="41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029" exitCode=0 Nov 25 08:53:20 crc kubenswrapper[4932]: I1125 08:53:20.190326 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfvv5" event={"ID":"7af73cde-771e-4ea0-b5c1-549cdee5181f","Type":"ContainerDied","Data":"41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029"} Nov 25 08:53:22 crc kubenswrapper[4932]: I1125 08:53:22.200839 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfvv5" event={"ID":"7af73cde-771e-4ea0-b5c1-549cdee5181f","Type":"ContainerStarted","Data":"e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7"} Nov 25 08:53:22 crc kubenswrapper[4932]: I1125 08:53:22.219929 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bfvv5" podStartSLOduration=3.613255591 podStartE2EDuration="6.219908427s" podCreationTimestamp="2025-11-25 08:53:16 +0000 UTC" firstStartedPulling="2025-11-25 08:53:18.173654321 +0000 UTC m=+258.299683884" lastFinishedPulling="2025-11-25 08:53:20.780307147 +0000 UTC m=+260.906336720" observedRunningTime="2025-11-25 08:53:22.218515346 +0000 UTC m=+262.344544899" watchObservedRunningTime="2025-11-25 08:53:22.219908427 +0000 UTC m=+262.345937990" Nov 25 08:53:23 crc kubenswrapper[4932]: I1125 08:53:23.963246 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:23 crc kubenswrapper[4932]: I1125 08:53:23.963660 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:24 crc kubenswrapper[4932]: I1125 08:53:24.007955 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:24 crc kubenswrapper[4932]: I1125 08:53:24.161982 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:24 crc kubenswrapper[4932]: I1125 08:53:24.162048 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:24 crc kubenswrapper[4932]: I1125 08:53:24.197364 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:24 crc kubenswrapper[4932]: I1125 08:53:24.252084 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mwss5" Nov 25 08:53:24 crc kubenswrapper[4932]: I1125 08:53:24.266599 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jp757" Nov 25 08:53:26 crc kubenswrapper[4932]: I1125 08:53:26.371455 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:26 crc kubenswrapper[4932]: I1125 08:53:26.371846 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:26 crc kubenswrapper[4932]: I1125 08:53:26.413316 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:26 crc kubenswrapper[4932]: I1125 08:53:26.628218 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:26 crc kubenswrapper[4932]: I1125 08:53:26.628269 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:26 crc kubenswrapper[4932]: I1125 08:53:26.677750 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:53:27 crc kubenswrapper[4932]: I1125 08:53:27.269252 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-95f8n" Nov 25 08:53:27 crc kubenswrapper[4932]: I1125 08:53:27.271198 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 08:55:07 crc kubenswrapper[4932]: I1125 08:55:07.181237 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 08:55:07 crc kubenswrapper[4932]: I1125 08:55:07.181898 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 08:55:37 crc kubenswrapper[4932]: I1125 08:55:37.181746 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 08:55:37 crc kubenswrapper[4932]: I1125 08:55:37.182434 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 08:56:07 crc kubenswrapper[4932]: I1125 08:56:07.180950 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 08:56:07 crc kubenswrapper[4932]: I1125 08:56:07.181766 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 08:56:07 crc kubenswrapper[4932]: I1125 08:56:07.181843 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:56:07 crc kubenswrapper[4932]: I1125 08:56:07.182885 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1be3115c0f5e9ad2a9965b1d4ca20e00cbc062a1f0c3346ce11cc851d5ae811c"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 08:56:07 crc kubenswrapper[4932]: I1125 08:56:07.182998 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://1be3115c0f5e9ad2a9965b1d4ca20e00cbc062a1f0c3346ce11cc851d5ae811c" gracePeriod=600 Nov 25 08:56:08 crc kubenswrapper[4932]: I1125 08:56:08.258856 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="1be3115c0f5e9ad2a9965b1d4ca20e00cbc062a1f0c3346ce11cc851d5ae811c" exitCode=0 Nov 25 08:56:08 crc kubenswrapper[4932]: I1125 08:56:08.258973 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"1be3115c0f5e9ad2a9965b1d4ca20e00cbc062a1f0c3346ce11cc851d5ae811c"} Nov 25 08:56:08 crc kubenswrapper[4932]: I1125 08:56:08.259160 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"1014150dedd5450252a58cc05ec9112ff1e142db0b602232dc6a81197418f719"} Nov 25 08:56:08 crc kubenswrapper[4932]: I1125 08:56:08.259232 4932 scope.go:117] "RemoveContainer" containerID="40d4ee00089c802ab84524c697b6e3163dd79d145034c7d6816654c2c231f177" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.690451 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-js6tz"] Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.692112 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.703639 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-js6tz"] Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.841805 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.841883 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/633bcf87-3eb6-47af-a8fa-b50788aba639-registry-certificates\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.841938 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/633bcf87-3eb6-47af-a8fa-b50788aba639-ca-trust-extracted\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.841964 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/633bcf87-3eb6-47af-a8fa-b50788aba639-registry-tls\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.841985 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/633bcf87-3eb6-47af-a8fa-b50788aba639-bound-sa-token\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.842008 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/633bcf87-3eb6-47af-a8fa-b50788aba639-installation-pull-secrets\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.842030 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/633bcf87-3eb6-47af-a8fa-b50788aba639-trusted-ca\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.842087 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qczc4\" (UniqueName: \"kubernetes.io/projected/633bcf87-3eb6-47af-a8fa-b50788aba639-kube-api-access-qczc4\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.863477 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.943718 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/633bcf87-3eb6-47af-a8fa-b50788aba639-trusted-ca\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.943824 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qczc4\" (UniqueName: \"kubernetes.io/projected/633bcf87-3eb6-47af-a8fa-b50788aba639-kube-api-access-qczc4\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.943922 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/633bcf87-3eb6-47af-a8fa-b50788aba639-registry-certificates\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.943986 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/633bcf87-3eb6-47af-a8fa-b50788aba639-ca-trust-extracted\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.944018 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/633bcf87-3eb6-47af-a8fa-b50788aba639-registry-tls\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.944051 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/633bcf87-3eb6-47af-a8fa-b50788aba639-bound-sa-token\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.944087 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/633bcf87-3eb6-47af-a8fa-b50788aba639-installation-pull-secrets\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.944647 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/633bcf87-3eb6-47af-a8fa-b50788aba639-ca-trust-extracted\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.945316 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/633bcf87-3eb6-47af-a8fa-b50788aba639-registry-certificates\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.945770 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/633bcf87-3eb6-47af-a8fa-b50788aba639-trusted-ca\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.950856 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/633bcf87-3eb6-47af-a8fa-b50788aba639-installation-pull-secrets\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.950909 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/633bcf87-3eb6-47af-a8fa-b50788aba639-registry-tls\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.963855 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/633bcf87-3eb6-47af-a8fa-b50788aba639-bound-sa-token\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:55 crc kubenswrapper[4932]: I1125 08:56:55.964002 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qczc4\" (UniqueName: \"kubernetes.io/projected/633bcf87-3eb6-47af-a8fa-b50788aba639-kube-api-access-qczc4\") pod \"image-registry-66df7c8f76-js6tz\" (UID: \"633bcf87-3eb6-47af-a8fa-b50788aba639\") " pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:56 crc kubenswrapper[4932]: I1125 08:56:56.007120 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:56 crc kubenswrapper[4932]: I1125 08:56:56.231019 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-js6tz"] Nov 25 08:56:56 crc kubenswrapper[4932]: I1125 08:56:56.571929 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" event={"ID":"633bcf87-3eb6-47af-a8fa-b50788aba639","Type":"ContainerStarted","Data":"2faad1f58344ab031826f437fc44445fea527bf7252a9c872b1389c84fc20ed8"} Nov 25 08:56:56 crc kubenswrapper[4932]: I1125 08:56:56.572356 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:56:56 crc kubenswrapper[4932]: I1125 08:56:56.572372 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" event={"ID":"633bcf87-3eb6-47af-a8fa-b50788aba639","Type":"ContainerStarted","Data":"4120132e52e7868bacf9f079e60500f4ef4a0b18c59a831c5625d3ca80d40dfa"} Nov 25 08:56:56 crc kubenswrapper[4932]: I1125 08:56:56.596230 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" podStartSLOduration=1.5961831530000001 podStartE2EDuration="1.596183153s" podCreationTimestamp="2025-11-25 08:56:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 08:56:56.595962247 +0000 UTC m=+476.721991820" watchObservedRunningTime="2025-11-25 08:56:56.596183153 +0000 UTC m=+476.722212726" Nov 25 08:57:16 crc kubenswrapper[4932]: I1125 08:57:16.016059 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" Nov 25 08:57:16 crc kubenswrapper[4932]: I1125 08:57:16.085220 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mgqrs"] Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.132243 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" podUID="dbba1e90-6d95-4837-b776-6a03a2e7901a" containerName="registry" containerID="cri-o://593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c" gracePeriod=30 Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.527542 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.713304 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-bound-sa-token\") pod \"dbba1e90-6d95-4837-b776-6a03a2e7901a\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.713406 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-tls\") pod \"dbba1e90-6d95-4837-b776-6a03a2e7901a\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.713640 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"dbba1e90-6d95-4837-b776-6a03a2e7901a\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.713672 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dbba1e90-6d95-4837-b776-6a03a2e7901a-installation-pull-secrets\") pod \"dbba1e90-6d95-4837-b776-6a03a2e7901a\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.713696 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dbba1e90-6d95-4837-b776-6a03a2e7901a-ca-trust-extracted\") pod \"dbba1e90-6d95-4837-b776-6a03a2e7901a\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.714773 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8lfz\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-kube-api-access-t8lfz\") pod \"dbba1e90-6d95-4837-b776-6a03a2e7901a\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.714814 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-trusted-ca\") pod \"dbba1e90-6d95-4837-b776-6a03a2e7901a\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.714879 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-certificates\") pod \"dbba1e90-6d95-4837-b776-6a03a2e7901a\" (UID: \"dbba1e90-6d95-4837-b776-6a03a2e7901a\") " Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.715392 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "dbba1e90-6d95-4837-b776-6a03a2e7901a" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.715574 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "dbba1e90-6d95-4837-b776-6a03a2e7901a" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.718616 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "dbba1e90-6d95-4837-b776-6a03a2e7901a" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.718675 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbba1e90-6d95-4837-b776-6a03a2e7901a-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "dbba1e90-6d95-4837-b776-6a03a2e7901a" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.718681 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-kube-api-access-t8lfz" (OuterVolumeSpecName: "kube-api-access-t8lfz") pod "dbba1e90-6d95-4837-b776-6a03a2e7901a" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a"). InnerVolumeSpecName "kube-api-access-t8lfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.719808 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "dbba1e90-6d95-4837-b776-6a03a2e7901a" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.734504 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbba1e90-6d95-4837-b776-6a03a2e7901a-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "dbba1e90-6d95-4837-b776-6a03a2e7901a" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.735886 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "dbba1e90-6d95-4837-b776-6a03a2e7901a" (UID: "dbba1e90-6d95-4837-b776-6a03a2e7901a"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.815965 4932 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.816006 4932 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.816019 4932 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.816033 4932 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/dbba1e90-6d95-4837-b776-6a03a2e7901a-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.816045 4932 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/dbba1e90-6d95-4837-b776-6a03a2e7901a-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.816057 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8lfz\" (UniqueName: \"kubernetes.io/projected/dbba1e90-6d95-4837-b776-6a03a2e7901a-kube-api-access-t8lfz\") on node \"crc\" DevicePath \"\"" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.816068 4932 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/dbba1e90-6d95-4837-b776-6a03a2e7901a-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.839725 4932 generic.go:334] "Generic (PLEG): container finished" podID="dbba1e90-6d95-4837-b776-6a03a2e7901a" containerID="593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c" exitCode=0 Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.839780 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.839781 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" event={"ID":"dbba1e90-6d95-4837-b776-6a03a2e7901a","Type":"ContainerDied","Data":"593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c"} Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.839826 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-mgqrs" event={"ID":"dbba1e90-6d95-4837-b776-6a03a2e7901a","Type":"ContainerDied","Data":"162ad2a51412fc1989531220753a3c260d76a66da6d8dc1c1791ac4758d24f47"} Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.839847 4932 scope.go:117] "RemoveContainer" containerID="593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.862920 4932 scope.go:117] "RemoveContainer" containerID="593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c" Nov 25 08:57:41 crc kubenswrapper[4932]: E1125 08:57:41.866299 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c\": container with ID starting with 593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c not found: ID does not exist" containerID="593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.866372 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c"} err="failed to get container status \"593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c\": rpc error: code = NotFound desc = could not find container \"593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c\": container with ID starting with 593fecff445f316b52547bb3d70c7ae9c440f193f537c1ef55327d6a586a5a0c not found: ID does not exist" Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.873580 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mgqrs"] Nov 25 08:57:41 crc kubenswrapper[4932]: I1125 08:57:41.880584 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-mgqrs"] Nov 25 08:57:42 crc kubenswrapper[4932]: I1125 08:57:42.619592 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbba1e90-6d95-4837-b776-6a03a2e7901a" path="/var/lib/kubelet/pods/dbba1e90-6d95-4837-b776-6a03a2e7901a/volumes" Nov 25 08:58:00 crc kubenswrapper[4932]: I1125 08:58:00.787635 4932 scope.go:117] "RemoveContainer" containerID="ed412641e3d1177ff3c499dcd0153ab70574d8210910cb45c9dcdae44f69e33f" Nov 25 08:58:07 crc kubenswrapper[4932]: I1125 08:58:07.181565 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 08:58:07 crc kubenswrapper[4932]: I1125 08:58:07.182488 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 08:58:37 crc kubenswrapper[4932]: I1125 08:58:37.180402 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 08:58:37 crc kubenswrapper[4932]: I1125 08:58:37.180825 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 08:59:07 crc kubenswrapper[4932]: I1125 08:59:07.180785 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 08:59:07 crc kubenswrapper[4932]: I1125 08:59:07.182269 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 08:59:07 crc kubenswrapper[4932]: I1125 08:59:07.182404 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 08:59:07 crc kubenswrapper[4932]: I1125 08:59:07.182898 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1014150dedd5450252a58cc05ec9112ff1e142db0b602232dc6a81197418f719"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 08:59:07 crc kubenswrapper[4932]: I1125 08:59:07.183020 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://1014150dedd5450252a58cc05ec9112ff1e142db0b602232dc6a81197418f719" gracePeriod=600 Nov 25 08:59:07 crc kubenswrapper[4932]: I1125 08:59:07.384070 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="1014150dedd5450252a58cc05ec9112ff1e142db0b602232dc6a81197418f719" exitCode=0 Nov 25 08:59:07 crc kubenswrapper[4932]: I1125 08:59:07.384115 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"1014150dedd5450252a58cc05ec9112ff1e142db0b602232dc6a81197418f719"} Nov 25 08:59:07 crc kubenswrapper[4932]: I1125 08:59:07.384145 4932 scope.go:117] "RemoveContainer" containerID="1be3115c0f5e9ad2a9965b1d4ca20e00cbc062a1f0c3346ce11cc851d5ae811c" Nov 25 08:59:08 crc kubenswrapper[4932]: I1125 08:59:08.389729 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"de649297e499e1a80fc45537977d7092776afd0add46df5f77009f80ee0893ea"} Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.145735 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r"] Nov 25 09:00:00 crc kubenswrapper[4932]: E1125 09:00:00.147834 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbba1e90-6d95-4837-b776-6a03a2e7901a" containerName="registry" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.147919 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbba1e90-6d95-4837-b776-6a03a2e7901a" containerName="registry" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.148113 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbba1e90-6d95-4837-b776-6a03a2e7901a" containerName="registry" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.148694 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.151693 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.151782 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.159973 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r"] Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.264073 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4fb98ca2-1d66-4de8-b842-2cbf51c82530-secret-volume\") pod \"collect-profiles-29401020-6gx8r\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.264108 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwn6r\" (UniqueName: \"kubernetes.io/projected/4fb98ca2-1d66-4de8-b842-2cbf51c82530-kube-api-access-qwn6r\") pod \"collect-profiles-29401020-6gx8r\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.264128 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4fb98ca2-1d66-4de8-b842-2cbf51c82530-config-volume\") pod \"collect-profiles-29401020-6gx8r\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.365755 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4fb98ca2-1d66-4de8-b842-2cbf51c82530-secret-volume\") pod \"collect-profiles-29401020-6gx8r\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.365804 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwn6r\" (UniqueName: \"kubernetes.io/projected/4fb98ca2-1d66-4de8-b842-2cbf51c82530-kube-api-access-qwn6r\") pod \"collect-profiles-29401020-6gx8r\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.365827 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4fb98ca2-1d66-4de8-b842-2cbf51c82530-config-volume\") pod \"collect-profiles-29401020-6gx8r\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.366874 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4fb98ca2-1d66-4de8-b842-2cbf51c82530-config-volume\") pod \"collect-profiles-29401020-6gx8r\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.374388 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4fb98ca2-1d66-4de8-b842-2cbf51c82530-secret-volume\") pod \"collect-profiles-29401020-6gx8r\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.382724 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwn6r\" (UniqueName: \"kubernetes.io/projected/4fb98ca2-1d66-4de8-b842-2cbf51c82530-kube-api-access-qwn6r\") pod \"collect-profiles-29401020-6gx8r\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.467700 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:00 crc kubenswrapper[4932]: I1125 09:00:00.879181 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r"] Nov 25 09:00:01 crc kubenswrapper[4932]: I1125 09:00:01.707124 4932 generic.go:334] "Generic (PLEG): container finished" podID="4fb98ca2-1d66-4de8-b842-2cbf51c82530" containerID="0e1064c5d6796c880e8232396383ce88d1f8d0581350bca7a5bdab8ced5974a6" exitCode=0 Nov 25 09:00:01 crc kubenswrapper[4932]: I1125 09:00:01.707166 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" event={"ID":"4fb98ca2-1d66-4de8-b842-2cbf51c82530","Type":"ContainerDied","Data":"0e1064c5d6796c880e8232396383ce88d1f8d0581350bca7a5bdab8ced5974a6"} Nov 25 09:00:01 crc kubenswrapper[4932]: I1125 09:00:01.707454 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" event={"ID":"4fb98ca2-1d66-4de8-b842-2cbf51c82530","Type":"ContainerStarted","Data":"3ce370e5a97510e2777022ec707772cab1a59f669d8db1a3c94b63f2e76d37a5"} Nov 25 09:00:02 crc kubenswrapper[4932]: I1125 09:00:02.927468 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.097936 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4fb98ca2-1d66-4de8-b842-2cbf51c82530-secret-volume\") pod \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.097996 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwn6r\" (UniqueName: \"kubernetes.io/projected/4fb98ca2-1d66-4de8-b842-2cbf51c82530-kube-api-access-qwn6r\") pod \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.098106 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4fb98ca2-1d66-4de8-b842-2cbf51c82530-config-volume\") pod \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\" (UID: \"4fb98ca2-1d66-4de8-b842-2cbf51c82530\") " Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.098970 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fb98ca2-1d66-4de8-b842-2cbf51c82530-config-volume" (OuterVolumeSpecName: "config-volume") pod "4fb98ca2-1d66-4de8-b842-2cbf51c82530" (UID: "4fb98ca2-1d66-4de8-b842-2cbf51c82530"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.099201 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4fb98ca2-1d66-4de8-b842-2cbf51c82530-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.103103 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fb98ca2-1d66-4de8-b842-2cbf51c82530-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4fb98ca2-1d66-4de8-b842-2cbf51c82530" (UID: "4fb98ca2-1d66-4de8-b842-2cbf51c82530"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.103376 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fb98ca2-1d66-4de8-b842-2cbf51c82530-kube-api-access-qwn6r" (OuterVolumeSpecName: "kube-api-access-qwn6r") pod "4fb98ca2-1d66-4de8-b842-2cbf51c82530" (UID: "4fb98ca2-1d66-4de8-b842-2cbf51c82530"). InnerVolumeSpecName "kube-api-access-qwn6r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.200790 4932 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4fb98ca2-1d66-4de8-b842-2cbf51c82530-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.200825 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwn6r\" (UniqueName: \"kubernetes.io/projected/4fb98ca2-1d66-4de8-b842-2cbf51c82530-kube-api-access-qwn6r\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.720848 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" event={"ID":"4fb98ca2-1d66-4de8-b842-2cbf51c82530","Type":"ContainerDied","Data":"3ce370e5a97510e2777022ec707772cab1a59f669d8db1a3c94b63f2e76d37a5"} Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.721252 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ce370e5a97510e2777022ec707772cab1a59f669d8db1a3c94b63f2e76d37a5" Nov 25 09:00:03 crc kubenswrapper[4932]: I1125 09:00:03.720922 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r" Nov 25 09:00:23 crc kubenswrapper[4932]: I1125 09:00:23.515020 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rlhks"] Nov 25 09:00:23 crc kubenswrapper[4932]: I1125 09:00:23.515966 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovn-controller" containerID="cri-o://6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92" gracePeriod=30 Nov 25 09:00:23 crc kubenswrapper[4932]: I1125 09:00:23.516076 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="kube-rbac-proxy-node" containerID="cri-o://f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464" gracePeriod=30 Nov 25 09:00:23 crc kubenswrapper[4932]: I1125 09:00:23.516038 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="nbdb" containerID="cri-o://6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a" gracePeriod=30 Nov 25 09:00:23 crc kubenswrapper[4932]: I1125 09:00:23.516113 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovn-acl-logging" containerID="cri-o://bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0" gracePeriod=30 Nov 25 09:00:23 crc kubenswrapper[4932]: I1125 09:00:23.516090 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8" gracePeriod=30 Nov 25 09:00:23 crc kubenswrapper[4932]: I1125 09:00:23.516136 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="northd" containerID="cri-o://2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e" gracePeriod=30 Nov 25 09:00:23 crc kubenswrapper[4932]: I1125 09:00:23.516222 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="sbdb" containerID="cri-o://637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd" gracePeriod=30 Nov 25 09:00:23 crc kubenswrapper[4932]: I1125 09:00:23.565082 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" containerID="cri-o://5149c5d917ba8bbafe5c58c0bec0047288114ac8df493b6958cff2293f98d2b9" gracePeriod=30 Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.471373 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kvhb4_199dbdf9-e2fc-459e-9e17-f5d520309f0a/kube-multus/2.log" Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.472260 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kvhb4_199dbdf9-e2fc-459e-9e17-f5d520309f0a/kube-multus/1.log" Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.472462 4932 generic.go:334] "Generic (PLEG): container finished" podID="199dbdf9-e2fc-459e-9e17-f5d520309f0a" containerID="f45a3bd992f34b9cbe79f81c0d4c5cd880a266d1454be0b6ac82a7d3365272b1" exitCode=2 Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.472526 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kvhb4" event={"ID":"199dbdf9-e2fc-459e-9e17-f5d520309f0a","Type":"ContainerDied","Data":"f45a3bd992f34b9cbe79f81c0d4c5cd880a266d1454be0b6ac82a7d3365272b1"} Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.472737 4932 scope.go:117] "RemoveContainer" containerID="154dcd3feae41470aa678f3bfdfae9a5a4af769b14c800d21e37351835697115" Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.473524 4932 scope.go:117] "RemoveContainer" containerID="f45a3bd992f34b9cbe79f81c0d4c5cd880a266d1454be0b6ac82a7d3365272b1" Nov 25 09:00:24 crc kubenswrapper[4932]: E1125 09:00:24.473937 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-kvhb4_openshift-multus(199dbdf9-e2fc-459e-9e17-f5d520309f0a)\"" pod="openshift-multus/multus-kvhb4" podUID="199dbdf9-e2fc-459e-9e17-f5d520309f0a" Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.482761 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovnkube-controller/3.log" Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.485896 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovn-acl-logging/0.log" Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486351 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovn-controller/0.log" Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486694 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="5149c5d917ba8bbafe5c58c0bec0047288114ac8df493b6958cff2293f98d2b9" exitCode=0 Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486715 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd" exitCode=0 Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486722 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a" exitCode=0 Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486729 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e" exitCode=0 Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486735 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8" exitCode=0 Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486741 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464" exitCode=0 Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486746 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0" exitCode=143 Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486752 4932 generic.go:334] "Generic (PLEG): container finished" podID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerID="6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92" exitCode=143 Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486771 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"5149c5d917ba8bbafe5c58c0bec0047288114ac8df493b6958cff2293f98d2b9"} Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486794 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd"} Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486803 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a"} Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486811 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e"} Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486819 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8"} Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486827 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464"} Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486837 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0"} Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.486845 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92"} Nov 25 09:00:24 crc kubenswrapper[4932]: I1125 09:00:24.590724 4932 scope.go:117] "RemoveContainer" containerID="0eb4d3f170aa77093e2e16c302e2e703c6aa2061c8c6c6749b5ef86798ec44c6" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.017807 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovn-acl-logging/0.log" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.018771 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovn-controller/0.log" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.019397 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.081704 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-nzsn5"] Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.081980 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="northd" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.081996 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="northd" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082010 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="nbdb" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082019 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="nbdb" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082034 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="sbdb" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082043 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="sbdb" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082054 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovn-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082062 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovn-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082073 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082081 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082092 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fb98ca2-1d66-4de8-b842-2cbf51c82530" containerName="collect-profiles" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082101 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fb98ca2-1d66-4de8-b842-2cbf51c82530" containerName="collect-profiles" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082111 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082119 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082129 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082137 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082147 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082157 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082167 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="kubecfg-setup" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082175 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="kubecfg-setup" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082221 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="kube-rbac-proxy-node" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082233 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="kube-rbac-proxy-node" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082252 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovn-acl-logging" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082264 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovn-acl-logging" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082277 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082288 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: E1125 09:00:25.082299 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082309 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082437 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082451 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="northd" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082460 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="sbdb" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082469 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fb98ca2-1d66-4de8-b842-2cbf51c82530" containerName="collect-profiles" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082480 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082491 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="nbdb" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082502 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082510 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082519 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovn-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082529 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovn-acl-logging" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082541 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="kube-rbac-proxy-node" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082562 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.082790 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" containerName="ovnkube-controller" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.085369 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.213681 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-var-lib-cni-networks-ovn-kubernetes\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.213795 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-netd\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.213818 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-netns\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.213871 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/24f5eec6-6332-4bae-bce3-4faa1156c249-ovn-node-metrics-cert\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.213896 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-etc-openvswitch\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.213947 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-script-lib\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.213968 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-node-log\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214035 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-ovn\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214062 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-var-lib-openvswitch\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214068 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214101 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-config\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214129 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-bin\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214150 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-kubelet\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214273 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-systemd-units\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214297 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-slash\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214339 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-env-overrides\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214364 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-systemd\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214386 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-openvswitch\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214431 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2h2l2\" (UniqueName: \"kubernetes.io/projected/24f5eec6-6332-4bae-bce3-4faa1156c249-kube-api-access-2h2l2\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214456 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-ovn-kubernetes\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214478 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-log-socket\") pod \"24f5eec6-6332-4bae-bce3-4faa1156c249\" (UID: \"24f5eec6-6332-4bae-bce3-4faa1156c249\") " Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214608 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-etc-openvswitch\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214637 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-run-openvswitch\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214674 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-ovnkube-config\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214702 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-cni-netd\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214719 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-kubelet\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214758 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-ovn-node-metrics-cert\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214774 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-run-ovn-kubernetes\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214793 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-run-ovn\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214830 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-run-systemd\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214845 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214861 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-systemd-units\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214878 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-env-overrides\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214908 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-run-netns\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214924 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-node-log\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214939 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5htgk\" (UniqueName: \"kubernetes.io/projected/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-kube-api-access-5htgk\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214957 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-cni-bin\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214985 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-log-socket\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215011 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-ovnkube-script-lib\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215025 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-var-lib-openvswitch\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215041 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-slash\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215086 4932 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214125 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214151 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.214183 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215149 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215161 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215242 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-node-log" (OuterVolumeSpecName: "node-log") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215276 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215415 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215464 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215484 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215541 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-slash" (OuterVolumeSpecName: "host-slash") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215565 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215593 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-log-socket" (OuterVolumeSpecName: "log-socket") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215596 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215910 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.215947 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.231918 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24f5eec6-6332-4bae-bce3-4faa1156c249-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.231902 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24f5eec6-6332-4bae-bce3-4faa1156c249-kube-api-access-2h2l2" (OuterVolumeSpecName: "kube-api-access-2h2l2") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "kube-api-access-2h2l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.241866 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "24f5eec6-6332-4bae-bce3-4faa1156c249" (UID: "24f5eec6-6332-4bae-bce3-4faa1156c249"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316361 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-cni-netd\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316419 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-kubelet\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316442 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-ovn-node-metrics-cert\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316469 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-run-ovn-kubernetes\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316503 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-run-ovn\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316535 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-run-systemd\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316558 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316579 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-systemd-units\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316602 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-env-overrides\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316623 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-node-log\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316643 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-run-netns\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316663 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5htgk\" (UniqueName: \"kubernetes.io/projected/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-kube-api-access-5htgk\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316687 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-log-socket\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316706 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-cni-bin\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316738 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-var-lib-openvswitch\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316767 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-ovnkube-script-lib\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316789 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-slash\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316816 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-etc-openvswitch\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316843 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-run-openvswitch\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316870 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-ovnkube-config\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316922 4932 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316937 4932 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-node-log\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316949 4932 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316961 4932 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316972 4932 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316983 4932 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.316995 4932 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317006 4932 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317017 4932 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-slash\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317028 4932 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/24f5eec6-6332-4bae-bce3-4faa1156c249-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317039 4932 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317049 4932 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317063 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2h2l2\" (UniqueName: \"kubernetes.io/projected/24f5eec6-6332-4bae-bce3-4faa1156c249-kube-api-access-2h2l2\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317075 4932 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317085 4932 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-log-socket\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317097 4932 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317109 4932 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317121 4932 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/24f5eec6-6332-4bae-bce3-4faa1156c249-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317133 4932 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/24f5eec6-6332-4bae-bce3-4faa1156c249-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.317961 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-ovnkube-config\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.318058 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-cni-netd\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.318090 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-kubelet\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.321466 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-ovn-node-metrics-cert\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.321545 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-run-ovn-kubernetes\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.321575 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-run-ovn\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.321598 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-run-systemd\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.321622 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.321651 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-systemd-units\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.322003 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-env-overrides\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.322049 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-node-log\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.322092 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-run-netns\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.323764 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-log-socket\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.323828 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-cni-bin\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.323882 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-var-lib-openvswitch\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.323917 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-etc-openvswitch\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.323948 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-host-slash\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.323987 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-run-openvswitch\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.324359 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-ovnkube-script-lib\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.339024 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5htgk\" (UniqueName: \"kubernetes.io/projected/6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1-kube-api-access-5htgk\") pod \"ovnkube-node-nzsn5\" (UID: \"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1\") " pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.404993 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.496696 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovn-acl-logging/0.log" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.497101 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-rlhks_24f5eec6-6332-4bae-bce3-4faa1156c249/ovn-controller/0.log" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.497425 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" event={"ID":"24f5eec6-6332-4bae-bce3-4faa1156c249","Type":"ContainerDied","Data":"7d41e285bee6cb58a899bfc23d9145c2061aba51c0d0a0adb738007d0f29c5f8"} Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.497465 4932 scope.go:117] "RemoveContainer" containerID="5149c5d917ba8bbafe5c58c0bec0047288114ac8df493b6958cff2293f98d2b9" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.497593 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rlhks" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.503022 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" event={"ID":"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1","Type":"ContainerStarted","Data":"1da4eb5b338cfd8c5b72517a6103e144efd14f9a2afa1d61f0f7ed0961fa0ffc"} Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.504886 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kvhb4_199dbdf9-e2fc-459e-9e17-f5d520309f0a/kube-multus/2.log" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.518153 4932 scope.go:117] "RemoveContainer" containerID="637b00cc4c64cee4b7eeac61c9f1e9c0d6309bd258b67323c27538b7f04d9bfd" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.546517 4932 scope.go:117] "RemoveContainer" containerID="6ae1985f1cede978c603f0ab8ee6164bb21552931dca2e7b95431d74a4d7089a" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.548641 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rlhks"] Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.553161 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rlhks"] Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.560444 4932 scope.go:117] "RemoveContainer" containerID="2f41c80e95d49a49d5134f89e4d9e0e591e8b0929771d7653fbc5504296be49e" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.571898 4932 scope.go:117] "RemoveContainer" containerID="2d8865e94cfbfb6efa98bb9914588346d59f4dc5c0479faf8b9847aa9f5a4dd8" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.582329 4932 scope.go:117] "RemoveContainer" containerID="f199bc7e4ab011c90b86f7538596436f2b380ae5f3a469d4da02c1fd5d224464" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.595477 4932 scope.go:117] "RemoveContainer" containerID="bc28d5b0513a74848e6158efac079656b3f4a0fb1fdc6fc587175e2f26ea39c0" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.607258 4932 scope.go:117] "RemoveContainer" containerID="6ff3acbccefe1126b20945395d81f856d6e08701e11ae6ee0a0c4114325fef92" Nov 25 09:00:25 crc kubenswrapper[4932]: I1125 09:00:25.619135 4932 scope.go:117] "RemoveContainer" containerID="e8c3166f03ed386ad682d8794569033468bc5d089a6cfd741ff2a906fed84136" Nov 25 09:00:26 crc kubenswrapper[4932]: I1125 09:00:26.511709 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" event={"ID":"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1","Type":"ContainerDied","Data":"f628ade0f03375e36d547d6123fbc32b34214642c0ba384111d77458924a7354"} Nov 25 09:00:26 crc kubenswrapper[4932]: I1125 09:00:26.511513 4932 generic.go:334] "Generic (PLEG): container finished" podID="6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1" containerID="f628ade0f03375e36d547d6123fbc32b34214642c0ba384111d77458924a7354" exitCode=0 Nov 25 09:00:26 crc kubenswrapper[4932]: I1125 09:00:26.611948 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24f5eec6-6332-4bae-bce3-4faa1156c249" path="/var/lib/kubelet/pods/24f5eec6-6332-4bae-bce3-4faa1156c249/volumes" Nov 25 09:00:27 crc kubenswrapper[4932]: I1125 09:00:27.521282 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" event={"ID":"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1","Type":"ContainerStarted","Data":"5c66c7fe2460a0990178b7cec08e045907e44ce0ec2dc3a82a150c2c4523e585"} Nov 25 09:00:27 crc kubenswrapper[4932]: I1125 09:00:27.521678 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" event={"ID":"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1","Type":"ContainerStarted","Data":"bf624dc46ead8fd972712f1a347370999132f577bbbb3060e63d0ea49fd26f9d"} Nov 25 09:00:27 crc kubenswrapper[4932]: I1125 09:00:27.521698 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" event={"ID":"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1","Type":"ContainerStarted","Data":"5cddf9acf45641d674eb6b5ded73874677c1d06c98a0f69a305e655e37f5606d"} Nov 25 09:00:27 crc kubenswrapper[4932]: I1125 09:00:27.968872 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-hjclb"] Nov 25 09:00:27 crc kubenswrapper[4932]: I1125 09:00:27.970094 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.030808 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.030891 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.031126 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.031206 4932 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-vqtxm" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.153781 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/20306d8c-9042-4cc0-9957-6b45a2c58762-crc-storage\") pod \"crc-storage-crc-hjclb\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.153822 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/20306d8c-9042-4cc0-9957-6b45a2c58762-node-mnt\") pod \"crc-storage-crc-hjclb\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.154038 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lblhp\" (UniqueName: \"kubernetes.io/projected/20306d8c-9042-4cc0-9957-6b45a2c58762-kube-api-access-lblhp\") pod \"crc-storage-crc-hjclb\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.255200 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lblhp\" (UniqueName: \"kubernetes.io/projected/20306d8c-9042-4cc0-9957-6b45a2c58762-kube-api-access-lblhp\") pod \"crc-storage-crc-hjclb\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.255293 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/20306d8c-9042-4cc0-9957-6b45a2c58762-crc-storage\") pod \"crc-storage-crc-hjclb\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.255318 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/20306d8c-9042-4cc0-9957-6b45a2c58762-node-mnt\") pod \"crc-storage-crc-hjclb\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.255605 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/20306d8c-9042-4cc0-9957-6b45a2c58762-node-mnt\") pod \"crc-storage-crc-hjclb\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.256501 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/20306d8c-9042-4cc0-9957-6b45a2c58762-crc-storage\") pod \"crc-storage-crc-hjclb\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.272767 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lblhp\" (UniqueName: \"kubernetes.io/projected/20306d8c-9042-4cc0-9957-6b45a2c58762-kube-api-access-lblhp\") pod \"crc-storage-crc-hjclb\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.346451 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: E1125 09:00:28.369280 4932 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(8747004d6132cfa5a732a318039be63f434dff2109d0c74b49800395545068c5): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:00:28 crc kubenswrapper[4932]: E1125 09:00:28.369365 4932 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(8747004d6132cfa5a732a318039be63f434dff2109d0c74b49800395545068c5): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: E1125 09:00:28.369388 4932 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(8747004d6132cfa5a732a318039be63f434dff2109d0c74b49800395545068c5): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:28 crc kubenswrapper[4932]: E1125 09:00:28.369434 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-hjclb_crc-storage(20306d8c-9042-4cc0-9957-6b45a2c58762)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-hjclb_crc-storage(20306d8c-9042-4cc0-9957-6b45a2c58762)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(8747004d6132cfa5a732a318039be63f434dff2109d0c74b49800395545068c5): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-hjclb" podUID="20306d8c-9042-4cc0-9957-6b45a2c58762" Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.530040 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" event={"ID":"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1","Type":"ContainerStarted","Data":"f0fa6072f2fe210165e14baab39002958f06091741ca50d5228a37081ca69b29"} Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.530089 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" event={"ID":"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1","Type":"ContainerStarted","Data":"3eb9c2c07333c1ce465d587fe7dee5d883e5ba824483382722508ac33ae382a8"} Nov 25 09:00:28 crc kubenswrapper[4932]: I1125 09:00:28.530103 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" event={"ID":"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1","Type":"ContainerStarted","Data":"91d75e2c750252d9f37da65116818b68d7861b3463f9741dbae739fca1f4a4b8"} Nov 25 09:00:30 crc kubenswrapper[4932]: I1125 09:00:30.542968 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" event={"ID":"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1","Type":"ContainerStarted","Data":"3dff6ac6bebb7f4a2eda6a1259b22e5c16a472eaa0b232c9af13457d8d54dd75"} Nov 25 09:00:33 crc kubenswrapper[4932]: I1125 09:00:33.304343 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-hjclb"] Nov 25 09:00:33 crc kubenswrapper[4932]: I1125 09:00:33.305046 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:33 crc kubenswrapper[4932]: I1125 09:00:33.305596 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:33 crc kubenswrapper[4932]: E1125 09:00:33.330293 4932 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(92361b3a53faa6f687de46ea8842b7f480f0b20d33cb2f4510db632f6a150c87): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:00:33 crc kubenswrapper[4932]: E1125 09:00:33.330364 4932 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(92361b3a53faa6f687de46ea8842b7f480f0b20d33cb2f4510db632f6a150c87): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:33 crc kubenswrapper[4932]: E1125 09:00:33.330386 4932 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(92361b3a53faa6f687de46ea8842b7f480f0b20d33cb2f4510db632f6a150c87): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:33 crc kubenswrapper[4932]: E1125 09:00:33.330434 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-hjclb_crc-storage(20306d8c-9042-4cc0-9957-6b45a2c58762)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-hjclb_crc-storage(20306d8c-9042-4cc0-9957-6b45a2c58762)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(92361b3a53faa6f687de46ea8842b7f480f0b20d33cb2f4510db632f6a150c87): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-hjclb" podUID="20306d8c-9042-4cc0-9957-6b45a2c58762" Nov 25 09:00:33 crc kubenswrapper[4932]: I1125 09:00:33.559342 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" event={"ID":"6ec3e9ee-7c13-4155-b1cf-0d0f737a12d1","Type":"ContainerStarted","Data":"b9d3941b14b4de932b6ddeb88ea9f5a2902c783a6e507200f408a619b632563d"} Nov 25 09:00:33 crc kubenswrapper[4932]: I1125 09:00:33.560285 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:33 crc kubenswrapper[4932]: I1125 09:00:33.560340 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:33 crc kubenswrapper[4932]: I1125 09:00:33.560383 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:33 crc kubenswrapper[4932]: I1125 09:00:33.587330 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:33 crc kubenswrapper[4932]: I1125 09:00:33.588121 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:33 crc kubenswrapper[4932]: I1125 09:00:33.589320 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" podStartSLOduration=8.589307737 podStartE2EDuration="8.589307737s" podCreationTimestamp="2025-11-25 09:00:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:00:33.586390786 +0000 UTC m=+693.712420359" watchObservedRunningTime="2025-11-25 09:00:33.589307737 +0000 UTC m=+693.715337300" Nov 25 09:00:35 crc kubenswrapper[4932]: I1125 09:00:35.606429 4932 scope.go:117] "RemoveContainer" containerID="f45a3bd992f34b9cbe79f81c0d4c5cd880a266d1454be0b6ac82a7d3365272b1" Nov 25 09:00:35 crc kubenswrapper[4932]: E1125 09:00:35.606947 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-kvhb4_openshift-multus(199dbdf9-e2fc-459e-9e17-f5d520309f0a)\"" pod="openshift-multus/multus-kvhb4" podUID="199dbdf9-e2fc-459e-9e17-f5d520309f0a" Nov 25 09:00:46 crc kubenswrapper[4932]: I1125 09:00:46.605711 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:46 crc kubenswrapper[4932]: I1125 09:00:46.608289 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:46 crc kubenswrapper[4932]: E1125 09:00:46.648934 4932 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(635de6b37afe5bb3a93115689fd3e58c80f24ab2f9a823390f9bfcd5f584bbb6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 09:00:46 crc kubenswrapper[4932]: E1125 09:00:46.649000 4932 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(635de6b37afe5bb3a93115689fd3e58c80f24ab2f9a823390f9bfcd5f584bbb6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:46 crc kubenswrapper[4932]: E1125 09:00:46.649024 4932 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(635de6b37afe5bb3a93115689fd3e58c80f24ab2f9a823390f9bfcd5f584bbb6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:46 crc kubenswrapper[4932]: E1125 09:00:46.649078 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-hjclb_crc-storage(20306d8c-9042-4cc0-9957-6b45a2c58762)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-hjclb_crc-storage(20306d8c-9042-4cc0-9957-6b45a2c58762)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-hjclb_crc-storage_20306d8c-9042-4cc0-9957-6b45a2c58762_0(635de6b37afe5bb3a93115689fd3e58c80f24ab2f9a823390f9bfcd5f584bbb6): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-hjclb" podUID="20306d8c-9042-4cc0-9957-6b45a2c58762" Nov 25 09:00:49 crc kubenswrapper[4932]: I1125 09:00:49.606483 4932 scope.go:117] "RemoveContainer" containerID="f45a3bd992f34b9cbe79f81c0d4c5cd880a266d1454be0b6ac82a7d3365272b1" Nov 25 09:00:50 crc kubenswrapper[4932]: I1125 09:00:50.661078 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-kvhb4_199dbdf9-e2fc-459e-9e17-f5d520309f0a/kube-multus/2.log" Nov 25 09:00:50 crc kubenswrapper[4932]: I1125 09:00:50.661459 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-kvhb4" event={"ID":"199dbdf9-e2fc-459e-9e17-f5d520309f0a","Type":"ContainerStarted","Data":"a1a3c81be08b4cbf97e439919878c6a437b8bdc37736cd7e42d166435a50a2e5"} Nov 25 09:00:55 crc kubenswrapper[4932]: I1125 09:00:55.430285 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nzsn5" Nov 25 09:00:59 crc kubenswrapper[4932]: I1125 09:00:59.605601 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:59 crc kubenswrapper[4932]: I1125 09:00:59.606449 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:00:59 crc kubenswrapper[4932]: I1125 09:00:59.998605 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-hjclb"] Nov 25 09:01:00 crc kubenswrapper[4932]: I1125 09:01:00.007435 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:01:00 crc kubenswrapper[4932]: I1125 09:01:00.732534 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-hjclb" event={"ID":"20306d8c-9042-4cc0-9957-6b45a2c58762","Type":"ContainerStarted","Data":"3acead65d9cd21d2ca5a423399120e8f569b778aa16191c96ef61d4908cba106"} Nov 25 09:01:01 crc kubenswrapper[4932]: I1125 09:01:01.738833 4932 generic.go:334] "Generic (PLEG): container finished" podID="20306d8c-9042-4cc0-9957-6b45a2c58762" containerID="89c42f30eb26d38b0d42182d861713eb3e568b7805d21757f8c81cb7d45f640f" exitCode=0 Nov 25 09:01:01 crc kubenswrapper[4932]: I1125 09:01:01.738896 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-hjclb" event={"ID":"20306d8c-9042-4cc0-9957-6b45a2c58762","Type":"ContainerDied","Data":"89c42f30eb26d38b0d42182d861713eb3e568b7805d21757f8c81cb7d45f640f"} Nov 25 09:01:02 crc kubenswrapper[4932]: I1125 09:01:02.949651 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.090812 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/20306d8c-9042-4cc0-9957-6b45a2c58762-crc-storage\") pod \"20306d8c-9042-4cc0-9957-6b45a2c58762\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.090917 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/20306d8c-9042-4cc0-9957-6b45a2c58762-node-mnt\") pod \"20306d8c-9042-4cc0-9957-6b45a2c58762\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.090960 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lblhp\" (UniqueName: \"kubernetes.io/projected/20306d8c-9042-4cc0-9957-6b45a2c58762-kube-api-access-lblhp\") pod \"20306d8c-9042-4cc0-9957-6b45a2c58762\" (UID: \"20306d8c-9042-4cc0-9957-6b45a2c58762\") " Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.091021 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/20306d8c-9042-4cc0-9957-6b45a2c58762-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "20306d8c-9042-4cc0-9957-6b45a2c58762" (UID: "20306d8c-9042-4cc0-9957-6b45a2c58762"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.091392 4932 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/20306d8c-9042-4cc0-9957-6b45a2c58762-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.097736 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20306d8c-9042-4cc0-9957-6b45a2c58762-kube-api-access-lblhp" (OuterVolumeSpecName: "kube-api-access-lblhp") pod "20306d8c-9042-4cc0-9957-6b45a2c58762" (UID: "20306d8c-9042-4cc0-9957-6b45a2c58762"). InnerVolumeSpecName "kube-api-access-lblhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.104931 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20306d8c-9042-4cc0-9957-6b45a2c58762-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "20306d8c-9042-4cc0-9957-6b45a2c58762" (UID: "20306d8c-9042-4cc0-9957-6b45a2c58762"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.192080 4932 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/20306d8c-9042-4cc0-9957-6b45a2c58762-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.192123 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lblhp\" (UniqueName: \"kubernetes.io/projected/20306d8c-9042-4cc0-9957-6b45a2c58762-kube-api-access-lblhp\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.750315 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-hjclb" event={"ID":"20306d8c-9042-4cc0-9957-6b45a2c58762","Type":"ContainerDied","Data":"3acead65d9cd21d2ca5a423399120e8f569b778aa16191c96ef61d4908cba106"} Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.750361 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3acead65d9cd21d2ca5a423399120e8f569b778aa16191c96ef61d4908cba106" Nov 25 09:01:03 crc kubenswrapper[4932]: I1125 09:01:03.750373 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-hjclb" Nov 25 09:01:07 crc kubenswrapper[4932]: I1125 09:01:07.180753 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:01:07 crc kubenswrapper[4932]: I1125 09:01:07.181111 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:01:09 crc kubenswrapper[4932]: I1125 09:01:09.962985 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7"] Nov 25 09:01:09 crc kubenswrapper[4932]: E1125 09:01:09.963645 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20306d8c-9042-4cc0-9957-6b45a2c58762" containerName="storage" Nov 25 09:01:09 crc kubenswrapper[4932]: I1125 09:01:09.963664 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="20306d8c-9042-4cc0-9957-6b45a2c58762" containerName="storage" Nov 25 09:01:09 crc kubenswrapper[4932]: I1125 09:01:09.963775 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="20306d8c-9042-4cc0-9957-6b45a2c58762" containerName="storage" Nov 25 09:01:09 crc kubenswrapper[4932]: I1125 09:01:09.964642 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:09 crc kubenswrapper[4932]: I1125 09:01:09.966888 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:01:09 crc kubenswrapper[4932]: I1125 09:01:09.976627 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7"] Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.081417 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.081522 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjtdf\" (UniqueName: \"kubernetes.io/projected/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-kube-api-access-cjtdf\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.081661 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.182841 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.182941 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.182993 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjtdf\" (UniqueName: \"kubernetes.io/projected/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-kube-api-access-cjtdf\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.183492 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.183881 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.206306 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjtdf\" (UniqueName: \"kubernetes.io/projected/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-kube-api-access-cjtdf\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.280926 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.477501 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7"] Nov 25 09:01:10 crc kubenswrapper[4932]: W1125 09:01:10.488381 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7e3d9b1d_1241_4fef_9234_2dac1c0dc4b4.slice/crio-6950381cadc31184b1db0ddca2231388bad8297b308ce04f703fe96237e91c3a WatchSource:0}: Error finding container 6950381cadc31184b1db0ddca2231388bad8297b308ce04f703fe96237e91c3a: Status 404 returned error can't find the container with id 6950381cadc31184b1db0ddca2231388bad8297b308ce04f703fe96237e91c3a Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.786037 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" event={"ID":"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4","Type":"ContainerStarted","Data":"43411ac029df6b62f5e9a92c95ef4ce585a6abd610f5875fa112aa497d7077e5"} Nov 25 09:01:10 crc kubenswrapper[4932]: I1125 09:01:10.786537 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" event={"ID":"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4","Type":"ContainerStarted","Data":"6950381cadc31184b1db0ddca2231388bad8297b308ce04f703fe96237e91c3a"} Nov 25 09:01:11 crc kubenswrapper[4932]: I1125 09:01:11.793647 4932 generic.go:334] "Generic (PLEG): container finished" podID="7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" containerID="43411ac029df6b62f5e9a92c95ef4ce585a6abd610f5875fa112aa497d7077e5" exitCode=0 Nov 25 09:01:11 crc kubenswrapper[4932]: I1125 09:01:11.793691 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" event={"ID":"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4","Type":"ContainerDied","Data":"43411ac029df6b62f5e9a92c95ef4ce585a6abd610f5875fa112aa497d7077e5"} Nov 25 09:01:13 crc kubenswrapper[4932]: I1125 09:01:13.805976 4932 generic.go:334] "Generic (PLEG): container finished" podID="7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" containerID="2127b42a957be7654458011216ac0b74fdbd25664df0945cd5f785e81ae3a809" exitCode=0 Nov 25 09:01:13 crc kubenswrapper[4932]: I1125 09:01:13.806051 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" event={"ID":"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4","Type":"ContainerDied","Data":"2127b42a957be7654458011216ac0b74fdbd25664df0945cd5f785e81ae3a809"} Nov 25 09:01:14 crc kubenswrapper[4932]: I1125 09:01:14.813425 4932 generic.go:334] "Generic (PLEG): container finished" podID="7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" containerID="6dad9a9811b3e49794fed258662cda0350bbf4ca59f8e20f8feb5c655277bb80" exitCode=0 Nov 25 09:01:14 crc kubenswrapper[4932]: I1125 09:01:14.813633 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" event={"ID":"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4","Type":"ContainerDied","Data":"6dad9a9811b3e49794fed258662cda0350bbf4ca59f8e20f8feb5c655277bb80"} Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.119330 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.309397 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjtdf\" (UniqueName: \"kubernetes.io/projected/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-kube-api-access-cjtdf\") pod \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.309539 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-bundle\") pod \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.309579 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-util\") pod \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\" (UID: \"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4\") " Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.310170 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-bundle" (OuterVolumeSpecName: "bundle") pod "7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" (UID: "7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.314912 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-kube-api-access-cjtdf" (OuterVolumeSpecName: "kube-api-access-cjtdf") pod "7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" (UID: "7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4"). InnerVolumeSpecName "kube-api-access-cjtdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.410746 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjtdf\" (UniqueName: \"kubernetes.io/projected/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-kube-api-access-cjtdf\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.410777 4932 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.504707 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-util" (OuterVolumeSpecName: "util") pod "7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" (UID: "7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.512052 4932 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.831373 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" event={"ID":"7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4","Type":"ContainerDied","Data":"6950381cadc31184b1db0ddca2231388bad8297b308ce04f703fe96237e91c3a"} Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.831446 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6950381cadc31184b1db0ddca2231388bad8297b308ce04f703fe96237e91c3a" Nov 25 09:01:16 crc kubenswrapper[4932]: I1125 09:01:16.831453 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772erlth7" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.617641 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5zpcl"] Nov 25 09:01:18 crc kubenswrapper[4932]: E1125 09:01:18.618103 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" containerName="extract" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.618116 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" containerName="extract" Nov 25 09:01:18 crc kubenswrapper[4932]: E1125 09:01:18.618125 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" containerName="util" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.618131 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" containerName="util" Nov 25 09:01:18 crc kubenswrapper[4932]: E1125 09:01:18.618147 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" containerName="pull" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.618152 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" containerName="pull" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.618269 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e3d9b1d-1241-4fef-9234-2dac1c0dc4b4" containerName="extract" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.618646 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-5zpcl" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.620510 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.620770 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.624238 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-8l9k2" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.639678 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5zpcl"] Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.732324 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwls6\" (UniqueName: \"kubernetes.io/projected/cfb535ee-0efc-4eb9-b47d-ce2623eec14c-kube-api-access-qwls6\") pod \"nmstate-operator-557fdffb88-5zpcl\" (UID: \"cfb535ee-0efc-4eb9-b47d-ce2623eec14c\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5zpcl" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.833887 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwls6\" (UniqueName: \"kubernetes.io/projected/cfb535ee-0efc-4eb9-b47d-ce2623eec14c-kube-api-access-qwls6\") pod \"nmstate-operator-557fdffb88-5zpcl\" (UID: \"cfb535ee-0efc-4eb9-b47d-ce2623eec14c\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5zpcl" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.861346 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwls6\" (UniqueName: \"kubernetes.io/projected/cfb535ee-0efc-4eb9-b47d-ce2623eec14c-kube-api-access-qwls6\") pod \"nmstate-operator-557fdffb88-5zpcl\" (UID: \"cfb535ee-0efc-4eb9-b47d-ce2623eec14c\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-5zpcl" Nov 25 09:01:18 crc kubenswrapper[4932]: I1125 09:01:18.939486 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-5zpcl" Nov 25 09:01:19 crc kubenswrapper[4932]: I1125 09:01:19.408060 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-5zpcl"] Nov 25 09:01:19 crc kubenswrapper[4932]: I1125 09:01:19.849385 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-5zpcl" event={"ID":"cfb535ee-0efc-4eb9-b47d-ce2623eec14c","Type":"ContainerStarted","Data":"eb2b0871ccb005eef384bb520f07c3683864d4e76ba71b0bda742789cbfe0a27"} Nov 25 09:01:24 crc kubenswrapper[4932]: I1125 09:01:24.879135 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-5zpcl" event={"ID":"cfb535ee-0efc-4eb9-b47d-ce2623eec14c","Type":"ContainerStarted","Data":"76d4ce39540b6a4d290c68bf394b3c909553ac88e6b3c767f6618c9fabf5b4f4"} Nov 25 09:01:24 crc kubenswrapper[4932]: I1125 09:01:24.899604 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-5zpcl" podStartSLOduration=2.582795354 podStartE2EDuration="6.899588227s" podCreationTimestamp="2025-11-25 09:01:18 +0000 UTC" firstStartedPulling="2025-11-25 09:01:19.417967192 +0000 UTC m=+739.543996755" lastFinishedPulling="2025-11-25 09:01:23.734760045 +0000 UTC m=+743.860789628" observedRunningTime="2025-11-25 09:01:24.897422145 +0000 UTC m=+745.023451708" watchObservedRunningTime="2025-11-25 09:01:24.899588227 +0000 UTC m=+745.025617810" Nov 25 09:01:25 crc kubenswrapper[4932]: I1125 09:01:25.894160 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8"] Nov 25 09:01:25 crc kubenswrapper[4932]: I1125 09:01:25.895148 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8" Nov 25 09:01:25 crc kubenswrapper[4932]: I1125 09:01:25.897415 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-skfts" Nov 25 09:01:25 crc kubenswrapper[4932]: I1125 09:01:25.901665 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt"] Nov 25 09:01:25 crc kubenswrapper[4932]: I1125 09:01:25.902651 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" Nov 25 09:01:25 crc kubenswrapper[4932]: I1125 09:01:25.908348 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 09:01:25 crc kubenswrapper[4932]: I1125 09:01:25.913330 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8"] Nov 25 09:01:25 crc kubenswrapper[4932]: I1125 09:01:25.919685 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-t65d6"] Nov 25 09:01:25 crc kubenswrapper[4932]: I1125 09:01:25.920533 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:25 crc kubenswrapper[4932]: I1125 09:01:25.928274 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt"] Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.004346 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k"] Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.004980 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.006658 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-cvwkj" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.007587 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.008808 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.020469 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k"] Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.034517 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-ovs-socket\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.034573 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-nmstate-lock\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.034610 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-dbus-socket\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.034643 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sps4p\" (UniqueName: \"kubernetes.io/projected/e9498712-df99-468a-9867-a547fa6352d8-kube-api-access-sps4p\") pod \"nmstate-metrics-5dcf9c57c5-x95f8\" (UID: \"e9498712-df99-468a-9867-a547fa6352d8\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.034670 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pg8n\" (UniqueName: \"kubernetes.io/projected/a886f61b-daee-43ad-8c73-3510ade35dcb-kube-api-access-2pg8n\") pod \"nmstate-webhook-6b89b748d8-8mmjt\" (UID: \"a886f61b-daee-43ad-8c73-3510ade35dcb\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.034827 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/a886f61b-daee-43ad-8c73-3510ade35dcb-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-8mmjt\" (UID: \"a886f61b-daee-43ad-8c73-3510ade35dcb\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.034991 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjxbr\" (UniqueName: \"kubernetes.io/projected/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-kube-api-access-cjxbr\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.135967 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjxbr\" (UniqueName: \"kubernetes.io/projected/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-kube-api-access-cjxbr\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136025 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-ovs-socket\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136045 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-nmstate-lock\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136068 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-dbus-socket\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136103 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b882dfaf-6970-4fbb-8d1a-b0572641a3e5-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-9bw4k\" (UID: \"b882dfaf-6970-4fbb-8d1a-b0572641a3e5\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136133 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-658jv\" (UniqueName: \"kubernetes.io/projected/b882dfaf-6970-4fbb-8d1a-b0572641a3e5-kube-api-access-658jv\") pod \"nmstate-console-plugin-5874bd7bc5-9bw4k\" (UID: \"b882dfaf-6970-4fbb-8d1a-b0572641a3e5\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136136 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-ovs-socket\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136150 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-nmstate-lock\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136158 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sps4p\" (UniqueName: \"kubernetes.io/projected/e9498712-df99-468a-9867-a547fa6352d8-kube-api-access-sps4p\") pod \"nmstate-metrics-5dcf9c57c5-x95f8\" (UID: \"e9498712-df99-468a-9867-a547fa6352d8\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136260 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pg8n\" (UniqueName: \"kubernetes.io/projected/a886f61b-daee-43ad-8c73-3510ade35dcb-kube-api-access-2pg8n\") pod \"nmstate-webhook-6b89b748d8-8mmjt\" (UID: \"a886f61b-daee-43ad-8c73-3510ade35dcb\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136294 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b882dfaf-6970-4fbb-8d1a-b0572641a3e5-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-9bw4k\" (UID: \"b882dfaf-6970-4fbb-8d1a-b0572641a3e5\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136328 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/a886f61b-daee-43ad-8c73-3510ade35dcb-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-8mmjt\" (UID: \"a886f61b-daee-43ad-8c73-3510ade35dcb\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" Nov 25 09:01:26 crc kubenswrapper[4932]: E1125 09:01:26.136438 4932 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 25 09:01:26 crc kubenswrapper[4932]: E1125 09:01:26.136482 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a886f61b-daee-43ad-8c73-3510ade35dcb-tls-key-pair podName:a886f61b-daee-43ad-8c73-3510ade35dcb nodeName:}" failed. No retries permitted until 2025-11-25 09:01:26.636465544 +0000 UTC m=+746.762495107 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/a886f61b-daee-43ad-8c73-3510ade35dcb-tls-key-pair") pod "nmstate-webhook-6b89b748d8-8mmjt" (UID: "a886f61b-daee-43ad-8c73-3510ade35dcb") : secret "openshift-nmstate-webhook" not found Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.136440 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-dbus-socket\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.162514 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sps4p\" (UniqueName: \"kubernetes.io/projected/e9498712-df99-468a-9867-a547fa6352d8-kube-api-access-sps4p\") pod \"nmstate-metrics-5dcf9c57c5-x95f8\" (UID: \"e9498712-df99-468a-9867-a547fa6352d8\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.164022 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pg8n\" (UniqueName: \"kubernetes.io/projected/a886f61b-daee-43ad-8c73-3510ade35dcb-kube-api-access-2pg8n\") pod \"nmstate-webhook-6b89b748d8-8mmjt\" (UID: \"a886f61b-daee-43ad-8c73-3510ade35dcb\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.165339 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjxbr\" (UniqueName: \"kubernetes.io/projected/c2470d47-03f6-4fc9-8e22-e6a1fa17601e-kube-api-access-cjxbr\") pod \"nmstate-handler-t65d6\" (UID: \"c2470d47-03f6-4fc9-8e22-e6a1fa17601e\") " pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.212850 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.234584 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.237215 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b882dfaf-6970-4fbb-8d1a-b0572641a3e5-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-9bw4k\" (UID: \"b882dfaf-6970-4fbb-8d1a-b0572641a3e5\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.237266 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-658jv\" (UniqueName: \"kubernetes.io/projected/b882dfaf-6970-4fbb-8d1a-b0572641a3e5-kube-api-access-658jv\") pod \"nmstate-console-plugin-5874bd7bc5-9bw4k\" (UID: \"b882dfaf-6970-4fbb-8d1a-b0572641a3e5\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.237310 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b882dfaf-6970-4fbb-8d1a-b0572641a3e5-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-9bw4k\" (UID: \"b882dfaf-6970-4fbb-8d1a-b0572641a3e5\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.238395 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b882dfaf-6970-4fbb-8d1a-b0572641a3e5-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-9bw4k\" (UID: \"b882dfaf-6970-4fbb-8d1a-b0572641a3e5\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.252282 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b882dfaf-6970-4fbb-8d1a-b0572641a3e5-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-9bw4k\" (UID: \"b882dfaf-6970-4fbb-8d1a-b0572641a3e5\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.258051 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-658jv\" (UniqueName: \"kubernetes.io/projected/b882dfaf-6970-4fbb-8d1a-b0572641a3e5-kube-api-access-658jv\") pod \"nmstate-console-plugin-5874bd7bc5-9bw4k\" (UID: \"b882dfaf-6970-4fbb-8d1a-b0572641a3e5\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.270382 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-d7574db4-xzfmg"] Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.271225 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.280138 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-d7574db4-xzfmg"] Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.317969 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.439825 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-service-ca\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.440214 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-console-oauth-config\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.440247 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-trusted-ca-bundle\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.440268 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-console-serving-cert\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.440400 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-console-config\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.440546 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-oauth-serving-cert\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.440661 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rncmq\" (UniqueName: \"kubernetes.io/projected/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-kube-api-access-rncmq\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.501353 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k"] Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.541922 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-oauth-serving-cert\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.541998 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rncmq\" (UniqueName: \"kubernetes.io/projected/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-kube-api-access-rncmq\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.542032 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-service-ca\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.542070 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-console-oauth-config\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.542093 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-trusted-ca-bundle\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.542112 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-console-serving-cert\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.542144 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-console-config\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.543036 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-oauth-serving-cert\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.543070 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-service-ca\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.543087 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-console-config\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.544135 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-trusted-ca-bundle\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.547584 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-console-oauth-config\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.547586 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-console-serving-cert\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.558321 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rncmq\" (UniqueName: \"kubernetes.io/projected/0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1-kube-api-access-rncmq\") pod \"console-d7574db4-xzfmg\" (UID: \"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1\") " pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.612554 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.643111 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/a886f61b-daee-43ad-8c73-3510ade35dcb-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-8mmjt\" (UID: \"a886f61b-daee-43ad-8c73-3510ade35dcb\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.646541 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/a886f61b-daee-43ad-8c73-3510ade35dcb-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-8mmjt\" (UID: \"a886f61b-daee-43ad-8c73-3510ade35dcb\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.660921 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8"] Nov 25 09:01:26 crc kubenswrapper[4932]: W1125 09:01:26.665308 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode9498712_df99_468a_9867_a547fa6352d8.slice/crio-6bb947d3c2974aace5426f4d75cab876ac0bb7375ceca7a70bfbeddf8366abf3 WatchSource:0}: Error finding container 6bb947d3c2974aace5426f4d75cab876ac0bb7375ceca7a70bfbeddf8366abf3: Status 404 returned error can't find the container with id 6bb947d3c2974aace5426f4d75cab876ac0bb7375ceca7a70bfbeddf8366abf3 Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.821553 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.903481 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8" event={"ID":"e9498712-df99-468a-9867-a547fa6352d8","Type":"ContainerStarted","Data":"6bb947d3c2974aace5426f4d75cab876ac0bb7375ceca7a70bfbeddf8366abf3"} Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.904577 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" event={"ID":"b882dfaf-6970-4fbb-8d1a-b0572641a3e5","Type":"ContainerStarted","Data":"8e33c9bd8ed9fefded11d7ee99ccc4d33a7b75906369e36da7dfcfdfae8fcbdc"} Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.905224 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-t65d6" event={"ID":"c2470d47-03f6-4fc9-8e22-e6a1fa17601e","Type":"ContainerStarted","Data":"cdd8ac5c5117e07f674ab6268be49c876d8d0f57bf013cdefa634dca6a31a78d"} Nov 25 09:01:26 crc kubenswrapper[4932]: I1125 09:01:26.995890 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt"] Nov 25 09:01:27 crc kubenswrapper[4932]: W1125 09:01:27.006847 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda886f61b_daee_43ad_8c73_3510ade35dcb.slice/crio-1d4019fbc907697ce93c24f05f0b597ada461c4449a5a6c4f485ac9203f09603 WatchSource:0}: Error finding container 1d4019fbc907697ce93c24f05f0b597ada461c4449a5a6c4f485ac9203f09603: Status 404 returned error can't find the container with id 1d4019fbc907697ce93c24f05f0b597ada461c4449a5a6c4f485ac9203f09603 Nov 25 09:01:27 crc kubenswrapper[4932]: I1125 09:01:27.007623 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-d7574db4-xzfmg"] Nov 25 09:01:27 crc kubenswrapper[4932]: W1125 09:01:27.089784 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a926ba7_f6c7_47a9_ba5b_0b9aba7611c1.slice/crio-6d3241422d9b49be0e314898fdc800678cbf93b338d805cd246b5f4e501da5d0 WatchSource:0}: Error finding container 6d3241422d9b49be0e314898fdc800678cbf93b338d805cd246b5f4e501da5d0: Status 404 returned error can't find the container with id 6d3241422d9b49be0e314898fdc800678cbf93b338d805cd246b5f4e501da5d0 Nov 25 09:01:27 crc kubenswrapper[4932]: I1125 09:01:27.911630 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" event={"ID":"a886f61b-daee-43ad-8c73-3510ade35dcb","Type":"ContainerStarted","Data":"1d4019fbc907697ce93c24f05f0b597ada461c4449a5a6c4f485ac9203f09603"} Nov 25 09:01:27 crc kubenswrapper[4932]: I1125 09:01:27.913129 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-d7574db4-xzfmg" event={"ID":"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1","Type":"ContainerStarted","Data":"cd7b199accc450901a89286a88947cc283ec8447d47bc3efd90a66213eac2f23"} Nov 25 09:01:27 crc kubenswrapper[4932]: I1125 09:01:27.913177 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-d7574db4-xzfmg" event={"ID":"0a926ba7-f6c7-47a9-ba5b-0b9aba7611c1","Type":"ContainerStarted","Data":"6d3241422d9b49be0e314898fdc800678cbf93b338d805cd246b5f4e501da5d0"} Nov 25 09:01:27 crc kubenswrapper[4932]: I1125 09:01:27.935583 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-d7574db4-xzfmg" podStartSLOduration=1.935565653 podStartE2EDuration="1.935565653s" podCreationTimestamp="2025-11-25 09:01:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:01:27.933559446 +0000 UTC m=+748.059589019" watchObservedRunningTime="2025-11-25 09:01:27.935565653 +0000 UTC m=+748.061595206" Nov 25 09:01:29 crc kubenswrapper[4932]: I1125 09:01:29.925100 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" event={"ID":"a886f61b-daee-43ad-8c73-3510ade35dcb","Type":"ContainerStarted","Data":"0827299277e595d232700469160915d1d30d8575e2afe535afca3e1af2dd77e7"} Nov 25 09:01:29 crc kubenswrapper[4932]: I1125 09:01:29.925617 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" Nov 25 09:01:29 crc kubenswrapper[4932]: I1125 09:01:29.927248 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" event={"ID":"b882dfaf-6970-4fbb-8d1a-b0572641a3e5","Type":"ContainerStarted","Data":"05b6e9fceb12fdb19cdeaffb17cc4831e1a3f04b71a34f88d3796b01ed991813"} Nov 25 09:01:29 crc kubenswrapper[4932]: I1125 09:01:29.930339 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8" event={"ID":"e9498712-df99-468a-9867-a547fa6352d8","Type":"ContainerStarted","Data":"3c6ebc631eb2db520434c1363f348f2a01cf581b59455b7e4105dd11fde9c1d9"} Nov 25 09:01:29 crc kubenswrapper[4932]: I1125 09:01:29.978358 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" podStartSLOduration=2.308884544 podStartE2EDuration="4.978339306s" podCreationTimestamp="2025-11-25 09:01:25 +0000 UTC" firstStartedPulling="2025-11-25 09:01:27.009128673 +0000 UTC m=+747.135158236" lastFinishedPulling="2025-11-25 09:01:29.678583435 +0000 UTC m=+749.804612998" observedRunningTime="2025-11-25 09:01:29.959388043 +0000 UTC m=+750.085417606" watchObservedRunningTime="2025-11-25 09:01:29.978339306 +0000 UTC m=+750.104368869" Nov 25 09:01:30 crc kubenswrapper[4932]: I1125 09:01:30.627760 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-9bw4k" podStartSLOduration=2.4766112590000002 podStartE2EDuration="5.627744447s" podCreationTimestamp="2025-11-25 09:01:25 +0000 UTC" firstStartedPulling="2025-11-25 09:01:26.505979083 +0000 UTC m=+746.632008646" lastFinishedPulling="2025-11-25 09:01:29.657112271 +0000 UTC m=+749.783141834" observedRunningTime="2025-11-25 09:01:29.980896359 +0000 UTC m=+750.106925912" watchObservedRunningTime="2025-11-25 09:01:30.627744447 +0000 UTC m=+750.753774010" Nov 25 09:01:30 crc kubenswrapper[4932]: I1125 09:01:30.843431 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ctx5h"] Nov 25 09:01:30 crc kubenswrapper[4932]: I1125 09:01:30.843948 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" podUID="66f325cc-3180-4c77-afdc-7a642717d31f" containerName="controller-manager" containerID="cri-o://c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681" gracePeriod=30 Nov 25 09:01:30 crc kubenswrapper[4932]: I1125 09:01:30.937228 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-t65d6" event={"ID":"c2470d47-03f6-4fc9-8e22-e6a1fa17601e","Type":"ContainerStarted","Data":"2f319a9b1ce3afdf88e3acedaaedcc5ef19abeacafa143915225abaacd67f3e1"} Nov 25 09:01:30 crc kubenswrapper[4932]: I1125 09:01:30.955175 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-t65d6" podStartSLOduration=2.561464731 podStartE2EDuration="5.95514539s" podCreationTimestamp="2025-11-25 09:01:25 +0000 UTC" firstStartedPulling="2025-11-25 09:01:26.263430022 +0000 UTC m=+746.389459585" lastFinishedPulling="2025-11-25 09:01:29.657110681 +0000 UTC m=+749.783140244" observedRunningTime="2025-11-25 09:01:30.951263679 +0000 UTC m=+751.077293232" watchObservedRunningTime="2025-11-25 09:01:30.95514539 +0000 UTC m=+751.081174973" Nov 25 09:01:30 crc kubenswrapper[4932]: I1125 09:01:30.972666 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp"] Nov 25 09:01:30 crc kubenswrapper[4932]: I1125 09:01:30.972892 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" podUID="f7f76ccd-1388-46e1-b71c-0b4352d86eaf" containerName="route-controller-manager" containerID="cri-o://1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c" gracePeriod=30 Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.234970 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.242632 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.346355 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.414613 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-client-ca\") pod \"66f325cc-3180-4c77-afdc-7a642717d31f\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.414671 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66f325cc-3180-4c77-afdc-7a642717d31f-serving-cert\") pod \"66f325cc-3180-4c77-afdc-7a642717d31f\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.414692 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twnjh\" (UniqueName: \"kubernetes.io/projected/66f325cc-3180-4c77-afdc-7a642717d31f-kube-api-access-twnjh\") pod \"66f325cc-3180-4c77-afdc-7a642717d31f\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.414725 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-proxy-ca-bundles\") pod \"66f325cc-3180-4c77-afdc-7a642717d31f\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.414754 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-config\") pod \"66f325cc-3180-4c77-afdc-7a642717d31f\" (UID: \"66f325cc-3180-4c77-afdc-7a642717d31f\") " Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.415818 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-client-ca" (OuterVolumeSpecName: "client-ca") pod "66f325cc-3180-4c77-afdc-7a642717d31f" (UID: "66f325cc-3180-4c77-afdc-7a642717d31f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.415884 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-config" (OuterVolumeSpecName: "config") pod "66f325cc-3180-4c77-afdc-7a642717d31f" (UID: "66f325cc-3180-4c77-afdc-7a642717d31f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.416326 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "66f325cc-3180-4c77-afdc-7a642717d31f" (UID: "66f325cc-3180-4c77-afdc-7a642717d31f"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.421227 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66f325cc-3180-4c77-afdc-7a642717d31f-kube-api-access-twnjh" (OuterVolumeSpecName: "kube-api-access-twnjh") pod "66f325cc-3180-4c77-afdc-7a642717d31f" (UID: "66f325cc-3180-4c77-afdc-7a642717d31f"). InnerVolumeSpecName "kube-api-access-twnjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.421253 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66f325cc-3180-4c77-afdc-7a642717d31f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "66f325cc-3180-4c77-afdc-7a642717d31f" (UID: "66f325cc-3180-4c77-afdc-7a642717d31f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.515700 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-config\") pod \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.515810 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-serving-cert\") pod \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.515833 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-client-ca\") pod \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.515982 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9rfz\" (UniqueName: \"kubernetes.io/projected/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-kube-api-access-r9rfz\") pod \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\" (UID: \"f7f76ccd-1388-46e1-b71c-0b4352d86eaf\") " Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.516253 4932 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.516265 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66f325cc-3180-4c77-afdc-7a642717d31f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.516275 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twnjh\" (UniqueName: \"kubernetes.io/projected/66f325cc-3180-4c77-afdc-7a642717d31f-kube-api-access-twnjh\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.516284 4932 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.516292 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66f325cc-3180-4c77-afdc-7a642717d31f-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.516809 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-client-ca" (OuterVolumeSpecName: "client-ca") pod "f7f76ccd-1388-46e1-b71c-0b4352d86eaf" (UID: "f7f76ccd-1388-46e1-b71c-0b4352d86eaf"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.516820 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-config" (OuterVolumeSpecName: "config") pod "f7f76ccd-1388-46e1-b71c-0b4352d86eaf" (UID: "f7f76ccd-1388-46e1-b71c-0b4352d86eaf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.519390 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f7f76ccd-1388-46e1-b71c-0b4352d86eaf" (UID: "f7f76ccd-1388-46e1-b71c-0b4352d86eaf"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.519627 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-kube-api-access-r9rfz" (OuterVolumeSpecName: "kube-api-access-r9rfz") pod "f7f76ccd-1388-46e1-b71c-0b4352d86eaf" (UID: "f7f76ccd-1388-46e1-b71c-0b4352d86eaf"). InnerVolumeSpecName "kube-api-access-r9rfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.617732 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.617778 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.617812 4932 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.617828 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9rfz\" (UniqueName: \"kubernetes.io/projected/f7f76ccd-1388-46e1-b71c-0b4352d86eaf-kube-api-access-r9rfz\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.950711 4932 generic.go:334] "Generic (PLEG): container finished" podID="f7f76ccd-1388-46e1-b71c-0b4352d86eaf" containerID="1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c" exitCode=0 Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.950781 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" event={"ID":"f7f76ccd-1388-46e1-b71c-0b4352d86eaf","Type":"ContainerDied","Data":"1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c"} Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.950822 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.950840 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp" event={"ID":"f7f76ccd-1388-46e1-b71c-0b4352d86eaf","Type":"ContainerDied","Data":"74c65febdda5505ab33581ad1aa2f4b6ce7c459d2b50c6ef87ccb9bb1423145c"} Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.950863 4932 scope.go:117] "RemoveContainer" containerID="1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.955548 4932 generic.go:334] "Generic (PLEG): container finished" podID="66f325cc-3180-4c77-afdc-7a642717d31f" containerID="c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681" exitCode=0 Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.955614 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.955600 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" event={"ID":"66f325cc-3180-4c77-afdc-7a642717d31f","Type":"ContainerDied","Data":"c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681"} Nov 25 09:01:31 crc kubenswrapper[4932]: I1125 09:01:31.956021 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-ctx5h" event={"ID":"66f325cc-3180-4c77-afdc-7a642717d31f","Type":"ContainerDied","Data":"4b16452645b1969534d63060a7655e5bad74a9ca36987dbd7b27c4f4ba1bca4f"} Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.010822 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp"] Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.016778 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-v2fzp"] Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.020431 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ctx5h"] Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.025447 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-ctx5h"] Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.340564 4932 scope.go:117] "RemoveContainer" containerID="1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c" Nov 25 09:01:32 crc kubenswrapper[4932]: E1125 09:01:32.341151 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c\": container with ID starting with 1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c not found: ID does not exist" containerID="1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.341224 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c"} err="failed to get container status \"1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c\": rpc error: code = NotFound desc = could not find container \"1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c\": container with ID starting with 1c43cf46c6eb3931e82a29e7f34fce9f8cdcb4442a812df8adc7b5bac383455c not found: ID does not exist" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.341259 4932 scope.go:117] "RemoveContainer" containerID="c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.353755 4932 scope.go:117] "RemoveContainer" containerID="c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681" Nov 25 09:01:32 crc kubenswrapper[4932]: E1125 09:01:32.354094 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681\": container with ID starting with c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681 not found: ID does not exist" containerID="c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.354129 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681"} err="failed to get container status \"c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681\": rpc error: code = NotFound desc = could not find container \"c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681\": container with ID starting with c683e99ed341eb5f5f25add37f43a3988b62a7731f9b014028d62f391309c681 not found: ID does not exist" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.560235 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr"] Nov 25 09:01:32 crc kubenswrapper[4932]: E1125 09:01:32.561005 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66f325cc-3180-4c77-afdc-7a642717d31f" containerName="controller-manager" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.561028 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="66f325cc-3180-4c77-afdc-7a642717d31f" containerName="controller-manager" Nov 25 09:01:32 crc kubenswrapper[4932]: E1125 09:01:32.561041 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7f76ccd-1388-46e1-b71c-0b4352d86eaf" containerName="route-controller-manager" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.561051 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7f76ccd-1388-46e1-b71c-0b4352d86eaf" containerName="route-controller-manager" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.561201 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="66f325cc-3180-4c77-afdc-7a642717d31f" containerName="controller-manager" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.561225 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7f76ccd-1388-46e1-b71c-0b4352d86eaf" containerName="route-controller-manager" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.561685 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.564039 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.564219 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.564471 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.564543 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.564615 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.565153 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.567859 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-844894c8cf-d6kc2"] Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.568787 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.570907 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.572140 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.572382 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr"] Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.572613 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.572791 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.572866 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.572936 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.579715 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-844894c8cf-d6kc2"] Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.611391 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.633090 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66f325cc-3180-4c77-afdc-7a642717d31f" path="/var/lib/kubelet/pods/66f325cc-3180-4c77-afdc-7a642717d31f/volumes" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.633837 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7f76ccd-1388-46e1-b71c-0b4352d86eaf" path="/var/lib/kubelet/pods/f7f76ccd-1388-46e1-b71c-0b4352d86eaf/volumes" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.671530 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr"] Nov 25 09:01:32 crc kubenswrapper[4932]: E1125 09:01:32.671941 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[client-ca config kube-api-access-d47dt serving-cert], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" podUID="08b5d41f-a902-493a-833f-974de84aa121" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.734606 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-client-ca\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.734665 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f2tz\" (UniqueName: \"kubernetes.io/projected/121af39f-61df-4b13-bf2d-55a38ddc7fd3-kube-api-access-2f2tz\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.734722 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08b5d41f-a902-493a-833f-974de84aa121-serving-cert\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.734871 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d47dt\" (UniqueName: \"kubernetes.io/projected/08b5d41f-a902-493a-833f-974de84aa121-kube-api-access-d47dt\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.734980 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/121af39f-61df-4b13-bf2d-55a38ddc7fd3-client-ca\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.735019 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/121af39f-61df-4b13-bf2d-55a38ddc7fd3-config\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.735042 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-config\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.735203 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/121af39f-61df-4b13-bf2d-55a38ddc7fd3-serving-cert\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.735243 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/121af39f-61df-4b13-bf2d-55a38ddc7fd3-proxy-ca-bundles\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.836225 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08b5d41f-a902-493a-833f-974de84aa121-serving-cert\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.836294 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d47dt\" (UniqueName: \"kubernetes.io/projected/08b5d41f-a902-493a-833f-974de84aa121-kube-api-access-d47dt\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.836324 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/121af39f-61df-4b13-bf2d-55a38ddc7fd3-client-ca\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.836380 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/121af39f-61df-4b13-bf2d-55a38ddc7fd3-config\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.836399 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-config\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.836440 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/121af39f-61df-4b13-bf2d-55a38ddc7fd3-serving-cert\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.836461 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/121af39f-61df-4b13-bf2d-55a38ddc7fd3-proxy-ca-bundles\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.836504 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-client-ca\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.836526 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f2tz\" (UniqueName: \"kubernetes.io/projected/121af39f-61df-4b13-bf2d-55a38ddc7fd3-kube-api-access-2f2tz\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.837521 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-client-ca\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.837676 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-config\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.837691 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/121af39f-61df-4b13-bf2d-55a38ddc7fd3-client-ca\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.837769 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/121af39f-61df-4b13-bf2d-55a38ddc7fd3-config\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.838685 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/121af39f-61df-4b13-bf2d-55a38ddc7fd3-proxy-ca-bundles\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.841716 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08b5d41f-a902-493a-833f-974de84aa121-serving-cert\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.841788 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/121af39f-61df-4b13-bf2d-55a38ddc7fd3-serving-cert\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.856229 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f2tz\" (UniqueName: \"kubernetes.io/projected/121af39f-61df-4b13-bf2d-55a38ddc7fd3-kube-api-access-2f2tz\") pod \"controller-manager-844894c8cf-d6kc2\" (UID: \"121af39f-61df-4b13-bf2d-55a38ddc7fd3\") " pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.861543 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d47dt\" (UniqueName: \"kubernetes.io/projected/08b5d41f-a902-493a-833f-974de84aa121-kube-api-access-d47dt\") pod \"route-controller-manager-769d4cdb9d-tsqjr\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.906715 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.963595 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8" event={"ID":"e9498712-df99-468a-9867-a547fa6352d8","Type":"ContainerStarted","Data":"3606fd21efc0d447e727c66b15330288259c65ac99d34339c7618920d1a38195"} Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.966397 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.982252 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-x95f8" podStartSLOduration=2.199032385 podStartE2EDuration="7.982231784s" podCreationTimestamp="2025-11-25 09:01:25 +0000 UTC" firstStartedPulling="2025-11-25 09:01:26.669979903 +0000 UTC m=+746.796009466" lastFinishedPulling="2025-11-25 09:01:32.453179302 +0000 UTC m=+752.579208865" observedRunningTime="2025-11-25 09:01:32.982140671 +0000 UTC m=+753.108170254" watchObservedRunningTime="2025-11-25 09:01:32.982231784 +0000 UTC m=+753.108261347" Nov 25 09:01:32 crc kubenswrapper[4932]: I1125 09:01:32.994791 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.139655 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-client-ca\") pod \"08b5d41f-a902-493a-833f-974de84aa121\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.140218 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-client-ca" (OuterVolumeSpecName: "client-ca") pod "08b5d41f-a902-493a-833f-974de84aa121" (UID: "08b5d41f-a902-493a-833f-974de84aa121"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.140345 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-config\") pod \"08b5d41f-a902-493a-833f-974de84aa121\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.140852 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-config" (OuterVolumeSpecName: "config") pod "08b5d41f-a902-493a-833f-974de84aa121" (UID: "08b5d41f-a902-493a-833f-974de84aa121"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.140935 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08b5d41f-a902-493a-833f-974de84aa121-serving-cert\") pod \"08b5d41f-a902-493a-833f-974de84aa121\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.141554 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d47dt\" (UniqueName: \"kubernetes.io/projected/08b5d41f-a902-493a-833f-974de84aa121-kube-api-access-d47dt\") pod \"08b5d41f-a902-493a-833f-974de84aa121\" (UID: \"08b5d41f-a902-493a-833f-974de84aa121\") " Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.142368 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.142391 4932 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/08b5d41f-a902-493a-833f-974de84aa121-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.143930 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08b5d41f-a902-493a-833f-974de84aa121-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "08b5d41f-a902-493a-833f-974de84aa121" (UID: "08b5d41f-a902-493a-833f-974de84aa121"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.145401 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08b5d41f-a902-493a-833f-974de84aa121-kube-api-access-d47dt" (OuterVolumeSpecName: "kube-api-access-d47dt") pod "08b5d41f-a902-493a-833f-974de84aa121" (UID: "08b5d41f-a902-493a-833f-974de84aa121"). InnerVolumeSpecName "kube-api-access-d47dt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.243846 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d47dt\" (UniqueName: \"kubernetes.io/projected/08b5d41f-a902-493a-833f-974de84aa121-kube-api-access-d47dt\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.243904 4932 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/08b5d41f-a902-493a-833f-974de84aa121-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.353523 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-844894c8cf-d6kc2"] Nov 25 09:01:33 crc kubenswrapper[4932]: W1125 09:01:33.357944 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod121af39f_61df_4b13_bf2d_55a38ddc7fd3.slice/crio-a07e294eeb5603606de73726d7d3c0f0271c26c317199f05193f881322bc9be8 WatchSource:0}: Error finding container a07e294eeb5603606de73726d7d3c0f0271c26c317199f05193f881322bc9be8: Status 404 returned error can't find the container with id a07e294eeb5603606de73726d7d3c0f0271c26c317199f05193f881322bc9be8 Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.976698 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" event={"ID":"121af39f-61df-4b13-bf2d-55a38ddc7fd3","Type":"ContainerStarted","Data":"574ce3041a4b19180aeb09900cb7c9b13923904e7860b34b1a28f5913104604e"} Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.977086 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.977102 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" event={"ID":"121af39f-61df-4b13-bf2d-55a38ddc7fd3","Type":"ContainerStarted","Data":"a07e294eeb5603606de73726d7d3c0f0271c26c317199f05193f881322bc9be8"} Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.976735 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.981269 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" Nov 25 09:01:33 crc kubenswrapper[4932]: I1125 09:01:33.993355 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-844894c8cf-d6kc2" podStartSLOduration=2.993336549 podStartE2EDuration="2.993336549s" podCreationTimestamp="2025-11-25 09:01:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:01:33.992586398 +0000 UTC m=+754.118615961" watchObservedRunningTime="2025-11-25 09:01:33.993336549 +0000 UTC m=+754.119366112" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.045573 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb"] Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.046454 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.050276 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.050679 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.051644 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr"] Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.051690 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.051863 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.057068 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.057157 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.064059 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-769d4cdb9d-tsqjr"] Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.068557 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb"] Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.156285 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853174f8-4636-43a6-b506-c71cf16f2027-config\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.156333 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/853174f8-4636-43a6-b506-c71cf16f2027-serving-cert\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.156423 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qvrv\" (UniqueName: \"kubernetes.io/projected/853174f8-4636-43a6-b506-c71cf16f2027-kube-api-access-4qvrv\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.156446 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/853174f8-4636-43a6-b506-c71cf16f2027-client-ca\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.258035 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qvrv\" (UniqueName: \"kubernetes.io/projected/853174f8-4636-43a6-b506-c71cf16f2027-kube-api-access-4qvrv\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.258090 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/853174f8-4636-43a6-b506-c71cf16f2027-client-ca\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.258173 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853174f8-4636-43a6-b506-c71cf16f2027-config\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.258213 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/853174f8-4636-43a6-b506-c71cf16f2027-serving-cert\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.259818 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/853174f8-4636-43a6-b506-c71cf16f2027-config\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.260073 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/853174f8-4636-43a6-b506-c71cf16f2027-client-ca\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.263408 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/853174f8-4636-43a6-b506-c71cf16f2027-serving-cert\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.276711 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qvrv\" (UniqueName: \"kubernetes.io/projected/853174f8-4636-43a6-b506-c71cf16f2027-kube-api-access-4qvrv\") pod \"route-controller-manager-84cc7c4c97-7dztb\" (UID: \"853174f8-4636-43a6-b506-c71cf16f2027\") " pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.372992 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.611993 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08b5d41f-a902-493a-833f-974de84aa121" path="/var/lib/kubelet/pods/08b5d41f-a902-493a-833f-974de84aa121/volumes" Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.772170 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb"] Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.987750 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" event={"ID":"853174f8-4636-43a6-b506-c71cf16f2027","Type":"ContainerStarted","Data":"6ed8da1fb30240be034308b68a770b2d8d0282d35c95c0c80a418c239f9dd90b"} Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.987804 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" event={"ID":"853174f8-4636-43a6-b506-c71cf16f2027","Type":"ContainerStarted","Data":"b93460738c2b02c4d948fe68629b07a37562aa9a483d71189038e60781b8eb8d"} Nov 25 09:01:34 crc kubenswrapper[4932]: I1125 09:01:34.988091 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:35 crc kubenswrapper[4932]: I1125 09:01:35.005515 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" podStartSLOduration=3.005499527 podStartE2EDuration="3.005499527s" podCreationTimestamp="2025-11-25 09:01:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:01:35.004014944 +0000 UTC m=+755.130044517" watchObservedRunningTime="2025-11-25 09:01:35.005499527 +0000 UTC m=+755.131529080" Nov 25 09:01:35 crc kubenswrapper[4932]: I1125 09:01:35.200321 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-84cc7c4c97-7dztb" Nov 25 09:01:36 crc kubenswrapper[4932]: I1125 09:01:36.255638 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-t65d6" Nov 25 09:01:36 crc kubenswrapper[4932]: I1125 09:01:36.612601 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:36 crc kubenswrapper[4932]: I1125 09:01:36.612818 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:36 crc kubenswrapper[4932]: I1125 09:01:36.619061 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:37 crc kubenswrapper[4932]: I1125 09:01:37.001384 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-d7574db4-xzfmg" Nov 25 09:01:37 crc kubenswrapper[4932]: I1125 09:01:37.055653 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5kf8q"] Nov 25 09:01:37 crc kubenswrapper[4932]: I1125 09:01:37.181835 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:01:37 crc kubenswrapper[4932]: I1125 09:01:37.181911 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:01:40 crc kubenswrapper[4932]: I1125 09:01:40.387317 4932 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 09:01:46 crc kubenswrapper[4932]: I1125 09:01:46.831024 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-8mmjt" Nov 25 09:01:56 crc kubenswrapper[4932]: I1125 09:01:56.732041 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zhd9g"] Nov 25 09:01:56 crc kubenswrapper[4932]: I1125 09:01:56.741390 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:01:56 crc kubenswrapper[4932]: I1125 09:01:56.741421 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zhd9g"] Nov 25 09:01:56 crc kubenswrapper[4932]: I1125 09:01:56.878789 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-catalog-content\") pod \"community-operators-zhd9g\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:01:56 crc kubenswrapper[4932]: I1125 09:01:56.878907 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-utilities\") pod \"community-operators-zhd9g\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:01:56 crc kubenswrapper[4932]: I1125 09:01:56.878967 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2qxz\" (UniqueName: \"kubernetes.io/projected/45d40e5e-6214-433a-b5fe-14f767487c17-kube-api-access-w2qxz\") pod \"community-operators-zhd9g\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:01:56 crc kubenswrapper[4932]: I1125 09:01:56.980321 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-utilities\") pod \"community-operators-zhd9g\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:01:56 crc kubenswrapper[4932]: I1125 09:01:56.980428 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2qxz\" (UniqueName: \"kubernetes.io/projected/45d40e5e-6214-433a-b5fe-14f767487c17-kube-api-access-w2qxz\") pod \"community-operators-zhd9g\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:01:56 crc kubenswrapper[4932]: I1125 09:01:56.980465 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-catalog-content\") pod \"community-operators-zhd9g\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:01:56 crc kubenswrapper[4932]: I1125 09:01:56.980966 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-utilities\") pod \"community-operators-zhd9g\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:01:56 crc kubenswrapper[4932]: I1125 09:01:56.981001 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-catalog-content\") pod \"community-operators-zhd9g\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:01:57 crc kubenswrapper[4932]: I1125 09:01:57.013348 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2qxz\" (UniqueName: \"kubernetes.io/projected/45d40e5e-6214-433a-b5fe-14f767487c17-kube-api-access-w2qxz\") pod \"community-operators-zhd9g\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:01:57 crc kubenswrapper[4932]: I1125 09:01:57.075065 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:01:57 crc kubenswrapper[4932]: I1125 09:01:57.559863 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zhd9g"] Nov 25 09:01:58 crc kubenswrapper[4932]: I1125 09:01:58.110966 4932 generic.go:334] "Generic (PLEG): container finished" podID="45d40e5e-6214-433a-b5fe-14f767487c17" containerID="abd38d1f72f88fabfe179f5739fc6168aa20da71eab7f5ccec7f5b0a226b5dcb" exitCode=0 Nov 25 09:01:58 crc kubenswrapper[4932]: I1125 09:01:58.111398 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhd9g" event={"ID":"45d40e5e-6214-433a-b5fe-14f767487c17","Type":"ContainerDied","Data":"abd38d1f72f88fabfe179f5739fc6168aa20da71eab7f5ccec7f5b0a226b5dcb"} Nov 25 09:01:58 crc kubenswrapper[4932]: I1125 09:01:58.111990 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhd9g" event={"ID":"45d40e5e-6214-433a-b5fe-14f767487c17","Type":"ContainerStarted","Data":"ab4cdc6129cc0833ba5ef84409c5c4157243c50d796bb766c79daa18b78eb17a"} Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.145731 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6"] Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.147167 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.149531 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.160048 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6"] Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.211415 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wf95q\" (UniqueName: \"kubernetes.io/projected/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-kube-api-access-wf95q\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.211654 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.211781 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.312954 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.313024 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.313085 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wf95q\" (UniqueName: \"kubernetes.io/projected/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-kube-api-access-wf95q\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.313734 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.314794 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.333260 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wf95q\" (UniqueName: \"kubernetes.io/projected/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-kube-api-access-wf95q\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.472133 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:01:59 crc kubenswrapper[4932]: I1125 09:01:59.908362 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6"] Nov 25 09:02:00 crc kubenswrapper[4932]: I1125 09:02:00.128274 4932 generic.go:334] "Generic (PLEG): container finished" podID="45d40e5e-6214-433a-b5fe-14f767487c17" containerID="deab59c9880dcac5fafe1d9b9c80bea3c7fefa4f7c0627a60fb4bb4e416242dd" exitCode=0 Nov 25 09:02:00 crc kubenswrapper[4932]: I1125 09:02:00.128997 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhd9g" event={"ID":"45d40e5e-6214-433a-b5fe-14f767487c17","Type":"ContainerDied","Data":"deab59c9880dcac5fafe1d9b9c80bea3c7fefa4f7c0627a60fb4bb4e416242dd"} Nov 25 09:02:00 crc kubenswrapper[4932]: I1125 09:02:00.136782 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" event={"ID":"4ed07160-cea0-47ee-9609-b4f5e6ecacd2","Type":"ContainerStarted","Data":"c865a186bcc92095b65822c4e43f464842d2875fc5afaaa0a429d8368e6ec848"} Nov 25 09:02:01 crc kubenswrapper[4932]: I1125 09:02:01.144073 4932 generic.go:334] "Generic (PLEG): container finished" podID="4ed07160-cea0-47ee-9609-b4f5e6ecacd2" containerID="76cb26404e4f7e5d81ac8dffa53171c0949e7e77220a73c6f13b3872961e59c1" exitCode=0 Nov 25 09:02:01 crc kubenswrapper[4932]: I1125 09:02:01.144222 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" event={"ID":"4ed07160-cea0-47ee-9609-b4f5e6ecacd2","Type":"ContainerDied","Data":"76cb26404e4f7e5d81ac8dffa53171c0949e7e77220a73c6f13b3872961e59c1"} Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.099843 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-5kf8q" podUID="a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" containerName="console" containerID="cri-o://db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925" gracePeriod=15 Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.152574 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhd9g" event={"ID":"45d40e5e-6214-433a-b5fe-14f767487c17","Type":"ContainerStarted","Data":"83d00ca1fe38aca7f856f154bf187ea55b18e5ae3d8e899d757aefa81f819a0a"} Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.176970 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zhd9g" podStartSLOduration=3.360894204 podStartE2EDuration="6.176947888s" podCreationTimestamp="2025-11-25 09:01:56 +0000 UTC" firstStartedPulling="2025-11-25 09:01:58.112609313 +0000 UTC m=+778.238638876" lastFinishedPulling="2025-11-25 09:02:00.928662957 +0000 UTC m=+781.054692560" observedRunningTime="2025-11-25 09:02:02.174827404 +0000 UTC m=+782.300856967" watchObservedRunningTime="2025-11-25 09:02:02.176947888 +0000 UTC m=+782.302977451" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.641746 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5kf8q_a5c7ba70-572f-4a3b-a15f-c7bec9b45f49/console/0.log" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.641826 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.709215 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v6xnz"] Nov 25 09:02:02 crc kubenswrapper[4932]: E1125 09:02:02.709469 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" containerName="console" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.709483 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" containerName="console" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.709588 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" containerName="console" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.710348 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.724816 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v6xnz"] Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.761365 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czwl2\" (UniqueName: \"kubernetes.io/projected/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-kube-api-access-czwl2\") pod \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.761451 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-config\") pod \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.761498 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-trusted-ca-bundle\") pod \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.761921 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-service-ca\") pod \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.762023 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" (UID: "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.762040 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-config" (OuterVolumeSpecName: "console-config") pod "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" (UID: "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.762088 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-oauth-config\") pod \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.762153 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-oauth-serving-cert\") pod \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.762233 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-serving-cert\") pod \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\" (UID: \"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49\") " Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.762341 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-service-ca" (OuterVolumeSpecName: "service-ca") pod "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" (UID: "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.762567 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" (UID: "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.762777 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-utilities\") pod \"redhat-operators-v6xnz\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.762826 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7lff\" (UniqueName: \"kubernetes.io/projected/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-kube-api-access-r7lff\") pod \"redhat-operators-v6xnz\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.763026 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-catalog-content\") pod \"redhat-operators-v6xnz\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.763121 4932 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.763142 4932 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.763157 4932 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.763171 4932 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.769926 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-kube-api-access-czwl2" (OuterVolumeSpecName: "kube-api-access-czwl2") pod "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" (UID: "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49"). InnerVolumeSpecName "kube-api-access-czwl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.771091 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" (UID: "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.771538 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" (UID: "a5c7ba70-572f-4a3b-a15f-c7bec9b45f49"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.864066 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7lff\" (UniqueName: \"kubernetes.io/projected/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-kube-api-access-r7lff\") pod \"redhat-operators-v6xnz\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.864107 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-utilities\") pod \"redhat-operators-v6xnz\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.864151 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-catalog-content\") pod \"redhat-operators-v6xnz\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.864203 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czwl2\" (UniqueName: \"kubernetes.io/projected/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-kube-api-access-czwl2\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.864217 4932 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.864235 4932 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.864654 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-utilities\") pod \"redhat-operators-v6xnz\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.864670 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-catalog-content\") pod \"redhat-operators-v6xnz\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:02 crc kubenswrapper[4932]: I1125 09:02:02.881937 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7lff\" (UniqueName: \"kubernetes.io/projected/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-kube-api-access-r7lff\") pod \"redhat-operators-v6xnz\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.028730 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.159078 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-5kf8q_a5c7ba70-572f-4a3b-a15f-c7bec9b45f49/console/0.log" Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.159125 4932 generic.go:334] "Generic (PLEG): container finished" podID="a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" containerID="db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925" exitCode=2 Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.159759 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-5kf8q" Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.165679 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5kf8q" event={"ID":"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49","Type":"ContainerDied","Data":"db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925"} Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.165743 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-5kf8q" event={"ID":"a5c7ba70-572f-4a3b-a15f-c7bec9b45f49","Type":"ContainerDied","Data":"9b9dfe4e22f03da03d3ebc25365c15fcc7aeb19d49453a1e1a09d619da00f71b"} Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.165767 4932 scope.go:117] "RemoveContainer" containerID="db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925" Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.189783 4932 scope.go:117] "RemoveContainer" containerID="db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925" Nov 25 09:02:03 crc kubenswrapper[4932]: E1125 09:02:03.190353 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925\": container with ID starting with db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925 not found: ID does not exist" containerID="db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925" Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.190398 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925"} err="failed to get container status \"db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925\": rpc error: code = NotFound desc = could not find container \"db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925\": container with ID starting with db203d49f2da2e1deb0f38b799237845633357858ba12b9c8718488440675925 not found: ID does not exist" Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.195748 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-5kf8q"] Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.200373 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-5kf8q"] Nov 25 09:02:03 crc kubenswrapper[4932]: I1125 09:02:03.492584 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v6xnz"] Nov 25 09:02:04 crc kubenswrapper[4932]: I1125 09:02:04.165715 4932 generic.go:334] "Generic (PLEG): container finished" podID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" containerID="538a23f1eb753f8e5121f920f427e517fcde50d3b36f415135903da47e12f590" exitCode=0 Nov 25 09:02:04 crc kubenswrapper[4932]: I1125 09:02:04.165764 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xnz" event={"ID":"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2","Type":"ContainerDied","Data":"538a23f1eb753f8e5121f920f427e517fcde50d3b36f415135903da47e12f590"} Nov 25 09:02:04 crc kubenswrapper[4932]: I1125 09:02:04.166048 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xnz" event={"ID":"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2","Type":"ContainerStarted","Data":"9cbb8c75a343c30d1fdeceb9bd628082dac21d99a1cc786deb42fcb8e5747070"} Nov 25 09:02:04 crc kubenswrapper[4932]: I1125 09:02:04.614861 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5c7ba70-572f-4a3b-a15f-c7bec9b45f49" path="/var/lib/kubelet/pods/a5c7ba70-572f-4a3b-a15f-c7bec9b45f49/volumes" Nov 25 09:02:05 crc kubenswrapper[4932]: I1125 09:02:05.175907 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xnz" event={"ID":"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2","Type":"ContainerStarted","Data":"e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03"} Nov 25 09:02:05 crc kubenswrapper[4932]: I1125 09:02:05.178485 4932 generic.go:334] "Generic (PLEG): container finished" podID="4ed07160-cea0-47ee-9609-b4f5e6ecacd2" containerID="138dad0e3015bebf2e0179610fecfd0653cbb5c9ad062eb67d82eec3aac1e8d4" exitCode=0 Nov 25 09:02:05 crc kubenswrapper[4932]: I1125 09:02:05.178533 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" event={"ID":"4ed07160-cea0-47ee-9609-b4f5e6ecacd2","Type":"ContainerDied","Data":"138dad0e3015bebf2e0179610fecfd0653cbb5c9ad062eb67d82eec3aac1e8d4"} Nov 25 09:02:06 crc kubenswrapper[4932]: I1125 09:02:06.188547 4932 generic.go:334] "Generic (PLEG): container finished" podID="4ed07160-cea0-47ee-9609-b4f5e6ecacd2" containerID="9845ab62ab6b1305f1578d9eeb89c6b103baad33f607621d4e17a69b7d604232" exitCode=0 Nov 25 09:02:06 crc kubenswrapper[4932]: I1125 09:02:06.188633 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" event={"ID":"4ed07160-cea0-47ee-9609-b4f5e6ecacd2","Type":"ContainerDied","Data":"9845ab62ab6b1305f1578d9eeb89c6b103baad33f607621d4e17a69b7d604232"} Nov 25 09:02:06 crc kubenswrapper[4932]: I1125 09:02:06.190356 4932 generic.go:334] "Generic (PLEG): container finished" podID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" containerID="e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03" exitCode=0 Nov 25 09:02:06 crc kubenswrapper[4932]: I1125 09:02:06.190384 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xnz" event={"ID":"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2","Type":"ContainerDied","Data":"e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03"} Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.076179 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.076590 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.124298 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.180768 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.181067 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.181253 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.181975 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"de649297e499e1a80fc45537977d7092776afd0add46df5f77009f80ee0893ea"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.182142 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://de649297e499e1a80fc45537977d7092776afd0add46df5f77009f80ee0893ea" gracePeriod=600 Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.198250 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xnz" event={"ID":"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2","Type":"ContainerStarted","Data":"cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090"} Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.227258 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v6xnz" podStartSLOduration=2.759895726 podStartE2EDuration="5.227241575s" podCreationTimestamp="2025-11-25 09:02:02 +0000 UTC" firstStartedPulling="2025-11-25 09:02:04.176878912 +0000 UTC m=+784.302908475" lastFinishedPulling="2025-11-25 09:02:06.644224761 +0000 UTC m=+786.770254324" observedRunningTime="2025-11-25 09:02:07.226603768 +0000 UTC m=+787.352633331" watchObservedRunningTime="2025-11-25 09:02:07.227241575 +0000 UTC m=+787.353271138" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.249286 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.512229 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.538578 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wf95q\" (UniqueName: \"kubernetes.io/projected/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-kube-api-access-wf95q\") pod \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.538745 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-util\") pod \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.538776 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-bundle\") pod \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\" (UID: \"4ed07160-cea0-47ee-9609-b4f5e6ecacd2\") " Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.539798 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-bundle" (OuterVolumeSpecName: "bundle") pod "4ed07160-cea0-47ee-9609-b4f5e6ecacd2" (UID: "4ed07160-cea0-47ee-9609-b4f5e6ecacd2"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.545910 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-kube-api-access-wf95q" (OuterVolumeSpecName: "kube-api-access-wf95q") pod "4ed07160-cea0-47ee-9609-b4f5e6ecacd2" (UID: "4ed07160-cea0-47ee-9609-b4f5e6ecacd2"). InnerVolumeSpecName "kube-api-access-wf95q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.551913 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-util" (OuterVolumeSpecName: "util") pod "4ed07160-cea0-47ee-9609-b4f5e6ecacd2" (UID: "4ed07160-cea0-47ee-9609-b4f5e6ecacd2"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.640236 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wf95q\" (UniqueName: \"kubernetes.io/projected/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-kube-api-access-wf95q\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.640328 4932 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:07 crc kubenswrapper[4932]: I1125 09:02:07.640346 4932 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4ed07160-cea0-47ee-9609-b4f5e6ecacd2-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:08 crc kubenswrapper[4932]: I1125 09:02:08.209537 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" event={"ID":"4ed07160-cea0-47ee-9609-b4f5e6ecacd2","Type":"ContainerDied","Data":"c865a186bcc92095b65822c4e43f464842d2875fc5afaaa0a429d8368e6ec848"} Nov 25 09:02:08 crc kubenswrapper[4932]: I1125 09:02:08.209865 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c865a186bcc92095b65822c4e43f464842d2875fc5afaaa0a429d8368e6ec848" Nov 25 09:02:08 crc kubenswrapper[4932]: I1125 09:02:08.209585 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6hn7j6" Nov 25 09:02:08 crc kubenswrapper[4932]: I1125 09:02:08.215213 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"de649297e499e1a80fc45537977d7092776afd0add46df5f77009f80ee0893ea"} Nov 25 09:02:08 crc kubenswrapper[4932]: I1125 09:02:08.215277 4932 scope.go:117] "RemoveContainer" containerID="1014150dedd5450252a58cc05ec9112ff1e142db0b602232dc6a81197418f719" Nov 25 09:02:08 crc kubenswrapper[4932]: I1125 09:02:08.215238 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="de649297e499e1a80fc45537977d7092776afd0add46df5f77009f80ee0893ea" exitCode=0 Nov 25 09:02:08 crc kubenswrapper[4932]: I1125 09:02:08.215897 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"91eb2e40d6f72fe209b50e6c986a543f1a5accc33bb0098951f158439d3b5195"} Nov 25 09:02:10 crc kubenswrapper[4932]: I1125 09:02:10.697577 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zhd9g"] Nov 25 09:02:10 crc kubenswrapper[4932]: I1125 09:02:10.698404 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zhd9g" podUID="45d40e5e-6214-433a-b5fe-14f767487c17" containerName="registry-server" containerID="cri-o://83d00ca1fe38aca7f856f154bf187ea55b18e5ae3d8e899d757aefa81f819a0a" gracePeriod=2 Nov 25 09:02:13 crc kubenswrapper[4932]: I1125 09:02:13.029304 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:13 crc kubenswrapper[4932]: I1125 09:02:13.029653 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:13 crc kubenswrapper[4932]: I1125 09:02:13.069447 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:13 crc kubenswrapper[4932]: I1125 09:02:13.297691 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.261500 4932 generic.go:334] "Generic (PLEG): container finished" podID="45d40e5e-6214-433a-b5fe-14f767487c17" containerID="83d00ca1fe38aca7f856f154bf187ea55b18e5ae3d8e899d757aefa81f819a0a" exitCode=0 Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.262330 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhd9g" event={"ID":"45d40e5e-6214-433a-b5fe-14f767487c17","Type":"ContainerDied","Data":"83d00ca1fe38aca7f856f154bf187ea55b18e5ae3d8e899d757aefa81f819a0a"} Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.300741 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v6xnz"] Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.338713 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.447131 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2qxz\" (UniqueName: \"kubernetes.io/projected/45d40e5e-6214-433a-b5fe-14f767487c17-kube-api-access-w2qxz\") pod \"45d40e5e-6214-433a-b5fe-14f767487c17\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.447203 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-catalog-content\") pod \"45d40e5e-6214-433a-b5fe-14f767487c17\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.447238 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-utilities\") pod \"45d40e5e-6214-433a-b5fe-14f767487c17\" (UID: \"45d40e5e-6214-433a-b5fe-14f767487c17\") " Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.448587 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-utilities" (OuterVolumeSpecName: "utilities") pod "45d40e5e-6214-433a-b5fe-14f767487c17" (UID: "45d40e5e-6214-433a-b5fe-14f767487c17"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.458498 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45d40e5e-6214-433a-b5fe-14f767487c17-kube-api-access-w2qxz" (OuterVolumeSpecName: "kube-api-access-w2qxz") pod "45d40e5e-6214-433a-b5fe-14f767487c17" (UID: "45d40e5e-6214-433a-b5fe-14f767487c17"). InnerVolumeSpecName "kube-api-access-w2qxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.503538 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "45d40e5e-6214-433a-b5fe-14f767487c17" (UID: "45d40e5e-6214-433a-b5fe-14f767487c17"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.548939 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2qxz\" (UniqueName: \"kubernetes.io/projected/45d40e5e-6214-433a-b5fe-14f767487c17-kube-api-access-w2qxz\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.548983 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.548995 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d40e5e-6214-433a-b5fe-14f767487c17-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.603059 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl"] Nov 25 09:02:14 crc kubenswrapper[4932]: E1125 09:02:14.603300 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ed07160-cea0-47ee-9609-b4f5e6ecacd2" containerName="extract" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.603313 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ed07160-cea0-47ee-9609-b4f5e6ecacd2" containerName="extract" Nov 25 09:02:14 crc kubenswrapper[4932]: E1125 09:02:14.603329 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d40e5e-6214-433a-b5fe-14f767487c17" containerName="extract-utilities" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.603336 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d40e5e-6214-433a-b5fe-14f767487c17" containerName="extract-utilities" Nov 25 09:02:14 crc kubenswrapper[4932]: E1125 09:02:14.603344 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ed07160-cea0-47ee-9609-b4f5e6ecacd2" containerName="util" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.603350 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ed07160-cea0-47ee-9609-b4f5e6ecacd2" containerName="util" Nov 25 09:02:14 crc kubenswrapper[4932]: E1125 09:02:14.603364 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d40e5e-6214-433a-b5fe-14f767487c17" containerName="registry-server" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.603369 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d40e5e-6214-433a-b5fe-14f767487c17" containerName="registry-server" Nov 25 09:02:14 crc kubenswrapper[4932]: E1125 09:02:14.603379 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ed07160-cea0-47ee-9609-b4f5e6ecacd2" containerName="pull" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.603384 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ed07160-cea0-47ee-9609-b4f5e6ecacd2" containerName="pull" Nov 25 09:02:14 crc kubenswrapper[4932]: E1125 09:02:14.603393 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d40e5e-6214-433a-b5fe-14f767487c17" containerName="extract-content" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.603398 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d40e5e-6214-433a-b5fe-14f767487c17" containerName="extract-content" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.603490 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="45d40e5e-6214-433a-b5fe-14f767487c17" containerName="registry-server" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.603504 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ed07160-cea0-47ee-9609-b4f5e6ecacd2" containerName="extract" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.603974 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.606074 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.606272 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.607404 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.608430 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-275wq" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.609919 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.626894 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl"] Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.750937 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6wvk\" (UniqueName: \"kubernetes.io/projected/b7db3ea2-66e6-46f2-93b4-4c8405a1b566-kube-api-access-f6wvk\") pod \"metallb-operator-controller-manager-76dcd9496-2bqxl\" (UID: \"b7db3ea2-66e6-46f2-93b4-4c8405a1b566\") " pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.751030 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b7db3ea2-66e6-46f2-93b4-4c8405a1b566-apiservice-cert\") pod \"metallb-operator-controller-manager-76dcd9496-2bqxl\" (UID: \"b7db3ea2-66e6-46f2-93b4-4c8405a1b566\") " pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.751076 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b7db3ea2-66e6-46f2-93b4-4c8405a1b566-webhook-cert\") pod \"metallb-operator-controller-manager-76dcd9496-2bqxl\" (UID: \"b7db3ea2-66e6-46f2-93b4-4c8405a1b566\") " pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.852495 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6wvk\" (UniqueName: \"kubernetes.io/projected/b7db3ea2-66e6-46f2-93b4-4c8405a1b566-kube-api-access-f6wvk\") pod \"metallb-operator-controller-manager-76dcd9496-2bqxl\" (UID: \"b7db3ea2-66e6-46f2-93b4-4c8405a1b566\") " pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.853606 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b7db3ea2-66e6-46f2-93b4-4c8405a1b566-apiservice-cert\") pod \"metallb-operator-controller-manager-76dcd9496-2bqxl\" (UID: \"b7db3ea2-66e6-46f2-93b4-4c8405a1b566\") " pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.853735 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b7db3ea2-66e6-46f2-93b4-4c8405a1b566-webhook-cert\") pod \"metallb-operator-controller-manager-76dcd9496-2bqxl\" (UID: \"b7db3ea2-66e6-46f2-93b4-4c8405a1b566\") " pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.856280 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8"] Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.857026 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.858334 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/b7db3ea2-66e6-46f2-93b4-4c8405a1b566-webhook-cert\") pod \"metallb-operator-controller-manager-76dcd9496-2bqxl\" (UID: \"b7db3ea2-66e6-46f2-93b4-4c8405a1b566\") " pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.859417 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.859720 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.860589 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-q5zks" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.875089 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/b7db3ea2-66e6-46f2-93b4-4c8405a1b566-apiservice-cert\") pod \"metallb-operator-controller-manager-76dcd9496-2bqxl\" (UID: \"b7db3ea2-66e6-46f2-93b4-4c8405a1b566\") " pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.876056 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8"] Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.885332 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6wvk\" (UniqueName: \"kubernetes.io/projected/b7db3ea2-66e6-46f2-93b4-4c8405a1b566-kube-api-access-f6wvk\") pod \"metallb-operator-controller-manager-76dcd9496-2bqxl\" (UID: \"b7db3ea2-66e6-46f2-93b4-4c8405a1b566\") " pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.936030 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.956800 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c43a2905-37dc-4a7b-9667-aa6f85bc2efb-webhook-cert\") pod \"metallb-operator-webhook-server-dcc6f5b64-7t7w8\" (UID: \"c43a2905-37dc-4a7b-9667-aa6f85bc2efb\") " pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.956884 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6f7rt\" (UniqueName: \"kubernetes.io/projected/c43a2905-37dc-4a7b-9667-aa6f85bc2efb-kube-api-access-6f7rt\") pod \"metallb-operator-webhook-server-dcc6f5b64-7t7w8\" (UID: \"c43a2905-37dc-4a7b-9667-aa6f85bc2efb\") " pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:14 crc kubenswrapper[4932]: I1125 09:02:14.956960 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c43a2905-37dc-4a7b-9667-aa6f85bc2efb-apiservice-cert\") pod \"metallb-operator-webhook-server-dcc6f5b64-7t7w8\" (UID: \"c43a2905-37dc-4a7b-9667-aa6f85bc2efb\") " pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.058333 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c43a2905-37dc-4a7b-9667-aa6f85bc2efb-webhook-cert\") pod \"metallb-operator-webhook-server-dcc6f5b64-7t7w8\" (UID: \"c43a2905-37dc-4a7b-9667-aa6f85bc2efb\") " pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.058406 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6f7rt\" (UniqueName: \"kubernetes.io/projected/c43a2905-37dc-4a7b-9667-aa6f85bc2efb-kube-api-access-6f7rt\") pod \"metallb-operator-webhook-server-dcc6f5b64-7t7w8\" (UID: \"c43a2905-37dc-4a7b-9667-aa6f85bc2efb\") " pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.058922 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c43a2905-37dc-4a7b-9667-aa6f85bc2efb-apiservice-cert\") pod \"metallb-operator-webhook-server-dcc6f5b64-7t7w8\" (UID: \"c43a2905-37dc-4a7b-9667-aa6f85bc2efb\") " pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.065042 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c43a2905-37dc-4a7b-9667-aa6f85bc2efb-webhook-cert\") pod \"metallb-operator-webhook-server-dcc6f5b64-7t7w8\" (UID: \"c43a2905-37dc-4a7b-9667-aa6f85bc2efb\") " pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.065662 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c43a2905-37dc-4a7b-9667-aa6f85bc2efb-apiservice-cert\") pod \"metallb-operator-webhook-server-dcc6f5b64-7t7w8\" (UID: \"c43a2905-37dc-4a7b-9667-aa6f85bc2efb\") " pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.083608 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6f7rt\" (UniqueName: \"kubernetes.io/projected/c43a2905-37dc-4a7b-9667-aa6f85bc2efb-kube-api-access-6f7rt\") pod \"metallb-operator-webhook-server-dcc6f5b64-7t7w8\" (UID: \"c43a2905-37dc-4a7b-9667-aa6f85bc2efb\") " pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.156664 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl"] Nov 25 09:02:15 crc kubenswrapper[4932]: W1125 09:02:15.161955 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb7db3ea2_66e6_46f2_93b4_4c8405a1b566.slice/crio-989aee59ac39c68f8f4877262997a9da5937d5a15921d6d24ce92d2a0001e3e8 WatchSource:0}: Error finding container 989aee59ac39c68f8f4877262997a9da5937d5a15921d6d24ce92d2a0001e3e8: Status 404 returned error can't find the container with id 989aee59ac39c68f8f4877262997a9da5937d5a15921d6d24ce92d2a0001e3e8 Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.216894 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.270769 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zhd9g" event={"ID":"45d40e5e-6214-433a-b5fe-14f767487c17","Type":"ContainerDied","Data":"ab4cdc6129cc0833ba5ef84409c5c4157243c50d796bb766c79daa18b78eb17a"} Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.270820 4932 scope.go:117] "RemoveContainer" containerID="83d00ca1fe38aca7f856f154bf187ea55b18e5ae3d8e899d757aefa81f819a0a" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.270926 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zhd9g" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.274516 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v6xnz" podUID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" containerName="registry-server" containerID="cri-o://cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090" gracePeriod=2 Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.274597 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" event={"ID":"b7db3ea2-66e6-46f2-93b4-4c8405a1b566","Type":"ContainerStarted","Data":"989aee59ac39c68f8f4877262997a9da5937d5a15921d6d24ce92d2a0001e3e8"} Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.289925 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zhd9g"] Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.293287 4932 scope.go:117] "RemoveContainer" containerID="deab59c9880dcac5fafe1d9b9c80bea3c7fefa4f7c0627a60fb4bb4e416242dd" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.293878 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zhd9g"] Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.359171 4932 scope.go:117] "RemoveContainer" containerID="abd38d1f72f88fabfe179f5739fc6168aa20da71eab7f5ccec7f5b0a226b5dcb" Nov 25 09:02:15 crc kubenswrapper[4932]: I1125 09:02:15.690203 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8"] Nov 25 09:02:15 crc kubenswrapper[4932]: W1125 09:02:15.702271 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc43a2905_37dc_4a7b_9667_aa6f85bc2efb.slice/crio-8be393fcbc4363603639c4641126ff510d2edb3ca2a85d4269bbab86a930594d WatchSource:0}: Error finding container 8be393fcbc4363603639c4641126ff510d2edb3ca2a85d4269bbab86a930594d: Status 404 returned error can't find the container with id 8be393fcbc4363603639c4641126ff510d2edb3ca2a85d4269bbab86a930594d Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.126968 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.283468 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" event={"ID":"c43a2905-37dc-4a7b-9667-aa6f85bc2efb","Type":"ContainerStarted","Data":"8be393fcbc4363603639c4641126ff510d2edb3ca2a85d4269bbab86a930594d"} Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.285373 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7lff\" (UniqueName: \"kubernetes.io/projected/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-kube-api-access-r7lff\") pod \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.285462 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-utilities\") pod \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.285534 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-catalog-content\") pod \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\" (UID: \"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2\") " Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.294206 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-utilities" (OuterVolumeSpecName: "utilities") pod "1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" (UID: "1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.305610 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-kube-api-access-r7lff" (OuterVolumeSpecName: "kube-api-access-r7lff") pod "1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" (UID: "1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2"). InnerVolumeSpecName "kube-api-access-r7lff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.307599 4932 generic.go:334] "Generic (PLEG): container finished" podID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" containerID="cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090" exitCode=0 Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.307684 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xnz" event={"ID":"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2","Type":"ContainerDied","Data":"cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090"} Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.307735 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v6xnz" event={"ID":"1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2","Type":"ContainerDied","Data":"9cbb8c75a343c30d1fdeceb9bd628082dac21d99a1cc786deb42fcb8e5747070"} Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.307807 4932 scope.go:117] "RemoveContainer" containerID="cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.308335 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v6xnz" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.344362 4932 scope.go:117] "RemoveContainer" containerID="e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.368247 4932 scope.go:117] "RemoveContainer" containerID="538a23f1eb753f8e5121f920f427e517fcde50d3b36f415135903da47e12f590" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.385028 4932 scope.go:117] "RemoveContainer" containerID="cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090" Nov 25 09:02:16 crc kubenswrapper[4932]: E1125 09:02:16.385581 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090\": container with ID starting with cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090 not found: ID does not exist" containerID="cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.385653 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090"} err="failed to get container status \"cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090\": rpc error: code = NotFound desc = could not find container \"cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090\": container with ID starting with cbab28616d691d5306b957e20b88492b8d5da6fb5a2e6d03b025154789583090 not found: ID does not exist" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.385688 4932 scope.go:117] "RemoveContainer" containerID="e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03" Nov 25 09:02:16 crc kubenswrapper[4932]: E1125 09:02:16.387043 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03\": container with ID starting with e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03 not found: ID does not exist" containerID="e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.387077 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03"} err="failed to get container status \"e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03\": rpc error: code = NotFound desc = could not find container \"e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03\": container with ID starting with e19fd7bc3e2c7fbdc40682a2b9fc04c923d02f44e323ece01242b0d631d5db03 not found: ID does not exist" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.387100 4932 scope.go:117] "RemoveContainer" containerID="538a23f1eb753f8e5121f920f427e517fcde50d3b36f415135903da47e12f590" Nov 25 09:02:16 crc kubenswrapper[4932]: E1125 09:02:16.387432 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"538a23f1eb753f8e5121f920f427e517fcde50d3b36f415135903da47e12f590\": container with ID starting with 538a23f1eb753f8e5121f920f427e517fcde50d3b36f415135903da47e12f590 not found: ID does not exist" containerID="538a23f1eb753f8e5121f920f427e517fcde50d3b36f415135903da47e12f590" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.387506 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"538a23f1eb753f8e5121f920f427e517fcde50d3b36f415135903da47e12f590"} err="failed to get container status \"538a23f1eb753f8e5121f920f427e517fcde50d3b36f415135903da47e12f590\": rpc error: code = NotFound desc = could not find container \"538a23f1eb753f8e5121f920f427e517fcde50d3b36f415135903da47e12f590\": container with ID starting with 538a23f1eb753f8e5121f920f427e517fcde50d3b36f415135903da47e12f590 not found: ID does not exist" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.387621 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7lff\" (UniqueName: \"kubernetes.io/projected/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-kube-api-access-r7lff\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.387643 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.413569 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" (UID: "1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.488217 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.613379 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45d40e5e-6214-433a-b5fe-14f767487c17" path="/var/lib/kubelet/pods/45d40e5e-6214-433a-b5fe-14f767487c17/volumes" Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.651964 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v6xnz"] Nov 25 09:02:16 crc kubenswrapper[4932]: I1125 09:02:16.659819 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v6xnz"] Nov 25 09:02:18 crc kubenswrapper[4932]: I1125 09:02:18.614558 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" path="/var/lib/kubelet/pods/1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2/volumes" Nov 25 09:02:21 crc kubenswrapper[4932]: I1125 09:02:21.345858 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" event={"ID":"c43a2905-37dc-4a7b-9667-aa6f85bc2efb","Type":"ContainerStarted","Data":"5dda0cb1ab9058cd7b1379197291252ac20d345c3ab736273d1bccd2884d47e8"} Nov 25 09:02:21 crc kubenswrapper[4932]: I1125 09:02:21.346490 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:21 crc kubenswrapper[4932]: I1125 09:02:21.348141 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" event={"ID":"b7db3ea2-66e6-46f2-93b4-4c8405a1b566","Type":"ContainerStarted","Data":"76a6ba4522af11237dab4ac8b9195c17bdf7234c5cf79f3c9640a6b9b084af9e"} Nov 25 09:02:21 crc kubenswrapper[4932]: I1125 09:02:21.348393 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:21 crc kubenswrapper[4932]: I1125 09:02:21.365163 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" podStartSLOduration=2.725559266 podStartE2EDuration="7.365145066s" podCreationTimestamp="2025-11-25 09:02:14 +0000 UTC" firstStartedPulling="2025-11-25 09:02:15.708835893 +0000 UTC m=+795.834865456" lastFinishedPulling="2025-11-25 09:02:20.348421693 +0000 UTC m=+800.474451256" observedRunningTime="2025-11-25 09:02:21.361251077 +0000 UTC m=+801.487280640" watchObservedRunningTime="2025-11-25 09:02:21.365145066 +0000 UTC m=+801.491174629" Nov 25 09:02:21 crc kubenswrapper[4932]: I1125 09:02:21.388272 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" podStartSLOduration=2.28295738 podStartE2EDuration="7.388254583s" podCreationTimestamp="2025-11-25 09:02:14 +0000 UTC" firstStartedPulling="2025-11-25 09:02:15.165120568 +0000 UTC m=+795.291150131" lastFinishedPulling="2025-11-25 09:02:20.270417771 +0000 UTC m=+800.396447334" observedRunningTime="2025-11-25 09:02:21.385849792 +0000 UTC m=+801.511879365" watchObservedRunningTime="2025-11-25 09:02:21.388254583 +0000 UTC m=+801.514284146" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.705418 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6h7d9"] Nov 25 09:02:22 crc kubenswrapper[4932]: E1125 09:02:22.706127 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" containerName="extract-utilities" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.706143 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" containerName="extract-utilities" Nov 25 09:02:22 crc kubenswrapper[4932]: E1125 09:02:22.706164 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" containerName="registry-server" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.706172 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" containerName="registry-server" Nov 25 09:02:22 crc kubenswrapper[4932]: E1125 09:02:22.706181 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" containerName="extract-content" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.706204 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" containerName="extract-content" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.706317 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="1aa93aeb-c5b8-4bf1-9cb4-049f351cb0f2" containerName="registry-server" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.707253 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.716840 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6h7d9"] Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.871456 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nc4m\" (UniqueName: \"kubernetes.io/projected/3a9c996b-e39a-4fe9-83c8-81622c145fa3-kube-api-access-6nc4m\") pod \"redhat-marketplace-6h7d9\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.871513 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-catalog-content\") pod \"redhat-marketplace-6h7d9\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.871573 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-utilities\") pod \"redhat-marketplace-6h7d9\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.972488 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-utilities\") pod \"redhat-marketplace-6h7d9\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.972608 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nc4m\" (UniqueName: \"kubernetes.io/projected/3a9c996b-e39a-4fe9-83c8-81622c145fa3-kube-api-access-6nc4m\") pod \"redhat-marketplace-6h7d9\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.972635 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-catalog-content\") pod \"redhat-marketplace-6h7d9\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.973016 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-utilities\") pod \"redhat-marketplace-6h7d9\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.973132 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-catalog-content\") pod \"redhat-marketplace-6h7d9\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:22 crc kubenswrapper[4932]: I1125 09:02:22.994463 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nc4m\" (UniqueName: \"kubernetes.io/projected/3a9c996b-e39a-4fe9-83c8-81622c145fa3-kube-api-access-6nc4m\") pod \"redhat-marketplace-6h7d9\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:23 crc kubenswrapper[4932]: I1125 09:02:23.025940 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:23 crc kubenswrapper[4932]: I1125 09:02:23.465300 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6h7d9"] Nov 25 09:02:24 crc kubenswrapper[4932]: I1125 09:02:24.366839 4932 generic.go:334] "Generic (PLEG): container finished" podID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" containerID="414a2e39af89cdd1319189846bd171f7f84eec657032243e69a11b953fe5dc02" exitCode=0 Nov 25 09:02:24 crc kubenswrapper[4932]: I1125 09:02:24.366917 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6h7d9" event={"ID":"3a9c996b-e39a-4fe9-83c8-81622c145fa3","Type":"ContainerDied","Data":"414a2e39af89cdd1319189846bd171f7f84eec657032243e69a11b953fe5dc02"} Nov 25 09:02:24 crc kubenswrapper[4932]: I1125 09:02:24.367136 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6h7d9" event={"ID":"3a9c996b-e39a-4fe9-83c8-81622c145fa3","Type":"ContainerStarted","Data":"e778de0125b9692ce30dfab01cab9c9174c8c3f8b3bbf84c7444cc66af68d2e1"} Nov 25 09:02:26 crc kubenswrapper[4932]: I1125 09:02:26.382780 4932 generic.go:334] "Generic (PLEG): container finished" podID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" containerID="a1af6fff513fdb74e719ed525bfc1dabac6e91c59aeca509cddf0adbc7953766" exitCode=0 Nov 25 09:02:26 crc kubenswrapper[4932]: I1125 09:02:26.382875 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6h7d9" event={"ID":"3a9c996b-e39a-4fe9-83c8-81622c145fa3","Type":"ContainerDied","Data":"a1af6fff513fdb74e719ed525bfc1dabac6e91c59aeca509cddf0adbc7953766"} Nov 25 09:02:27 crc kubenswrapper[4932]: I1125 09:02:27.391299 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6h7d9" event={"ID":"3a9c996b-e39a-4fe9-83c8-81622c145fa3","Type":"ContainerStarted","Data":"9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743"} Nov 25 09:02:27 crc kubenswrapper[4932]: I1125 09:02:27.409319 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6h7d9" podStartSLOduration=2.995414802 podStartE2EDuration="5.409302864s" podCreationTimestamp="2025-11-25 09:02:22 +0000 UTC" firstStartedPulling="2025-11-25 09:02:24.368455053 +0000 UTC m=+804.494484616" lastFinishedPulling="2025-11-25 09:02:26.782343115 +0000 UTC m=+806.908372678" observedRunningTime="2025-11-25 09:02:27.407523398 +0000 UTC m=+807.533552971" watchObservedRunningTime="2025-11-25 09:02:27.409302864 +0000 UTC m=+807.535332427" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.026948 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.027441 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.069915 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.307723 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-75b46"] Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.309176 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.322280 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-75b46"] Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.408876 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpc2q\" (UniqueName: \"kubernetes.io/projected/d863485f-e172-41ed-8e1b-f6c5b7dff720-kube-api-access-vpc2q\") pod \"certified-operators-75b46\" (UID: \"d863485f-e172-41ed-8e1b-f6c5b7dff720\") " pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.408938 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d863485f-e172-41ed-8e1b-f6c5b7dff720-utilities\") pod \"certified-operators-75b46\" (UID: \"d863485f-e172-41ed-8e1b-f6c5b7dff720\") " pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.409003 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d863485f-e172-41ed-8e1b-f6c5b7dff720-catalog-content\") pod \"certified-operators-75b46\" (UID: \"d863485f-e172-41ed-8e1b-f6c5b7dff720\") " pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.470286 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.510233 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpc2q\" (UniqueName: \"kubernetes.io/projected/d863485f-e172-41ed-8e1b-f6c5b7dff720-kube-api-access-vpc2q\") pod \"certified-operators-75b46\" (UID: \"d863485f-e172-41ed-8e1b-f6c5b7dff720\") " pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.510313 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d863485f-e172-41ed-8e1b-f6c5b7dff720-utilities\") pod \"certified-operators-75b46\" (UID: \"d863485f-e172-41ed-8e1b-f6c5b7dff720\") " pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.510376 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d863485f-e172-41ed-8e1b-f6c5b7dff720-catalog-content\") pod \"certified-operators-75b46\" (UID: \"d863485f-e172-41ed-8e1b-f6c5b7dff720\") " pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.510921 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d863485f-e172-41ed-8e1b-f6c5b7dff720-catalog-content\") pod \"certified-operators-75b46\" (UID: \"d863485f-e172-41ed-8e1b-f6c5b7dff720\") " pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.510984 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d863485f-e172-41ed-8e1b-f6c5b7dff720-utilities\") pod \"certified-operators-75b46\" (UID: \"d863485f-e172-41ed-8e1b-f6c5b7dff720\") " pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.547463 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpc2q\" (UniqueName: \"kubernetes.io/projected/d863485f-e172-41ed-8e1b-f6c5b7dff720-kube-api-access-vpc2q\") pod \"certified-operators-75b46\" (UID: \"d863485f-e172-41ed-8e1b-f6c5b7dff720\") " pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:33 crc kubenswrapper[4932]: I1125 09:02:33.632502 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:34 crc kubenswrapper[4932]: I1125 09:02:34.125505 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-75b46"] Nov 25 09:02:34 crc kubenswrapper[4932]: W1125 09:02:34.139584 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd863485f_e172_41ed_8e1b_f6c5b7dff720.slice/crio-6a141da8fc340a7bc73c4778002507a2493a93cd6375e8c501040e8eb3484b1b WatchSource:0}: Error finding container 6a141da8fc340a7bc73c4778002507a2493a93cd6375e8c501040e8eb3484b1b: Status 404 returned error can't find the container with id 6a141da8fc340a7bc73c4778002507a2493a93cd6375e8c501040e8eb3484b1b Nov 25 09:02:34 crc kubenswrapper[4932]: I1125 09:02:34.431945 4932 generic.go:334] "Generic (PLEG): container finished" podID="d863485f-e172-41ed-8e1b-f6c5b7dff720" containerID="f2352e9bf3c56693488daca7e75f7f3315e15ea90be2f24696ce51df8ce09aed" exitCode=0 Nov 25 09:02:34 crc kubenswrapper[4932]: I1125 09:02:34.432027 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75b46" event={"ID":"d863485f-e172-41ed-8e1b-f6c5b7dff720","Type":"ContainerDied","Data":"f2352e9bf3c56693488daca7e75f7f3315e15ea90be2f24696ce51df8ce09aed"} Nov 25 09:02:34 crc kubenswrapper[4932]: I1125 09:02:34.432086 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75b46" event={"ID":"d863485f-e172-41ed-8e1b-f6c5b7dff720","Type":"ContainerStarted","Data":"6a141da8fc340a7bc73c4778002507a2493a93cd6375e8c501040e8eb3484b1b"} Nov 25 09:02:35 crc kubenswrapper[4932]: I1125 09:02:35.221985 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-dcc6f5b64-7t7w8" Nov 25 09:02:36 crc kubenswrapper[4932]: I1125 09:02:36.496994 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6h7d9"] Nov 25 09:02:36 crc kubenswrapper[4932]: I1125 09:02:36.498050 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6h7d9" podUID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" containerName="registry-server" containerID="cri-o://9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743" gracePeriod=2 Nov 25 09:02:36 crc kubenswrapper[4932]: I1125 09:02:36.897268 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.063052 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-catalog-content\") pod \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.063140 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6nc4m\" (UniqueName: \"kubernetes.io/projected/3a9c996b-e39a-4fe9-83c8-81622c145fa3-kube-api-access-6nc4m\") pod \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.063226 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-utilities\") pod \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\" (UID: \"3a9c996b-e39a-4fe9-83c8-81622c145fa3\") " Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.064062 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-utilities" (OuterVolumeSpecName: "utilities") pod "3a9c996b-e39a-4fe9-83c8-81622c145fa3" (UID: "3a9c996b-e39a-4fe9-83c8-81622c145fa3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.068976 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a9c996b-e39a-4fe9-83c8-81622c145fa3-kube-api-access-6nc4m" (OuterVolumeSpecName: "kube-api-access-6nc4m") pod "3a9c996b-e39a-4fe9-83c8-81622c145fa3" (UID: "3a9c996b-e39a-4fe9-83c8-81622c145fa3"). InnerVolumeSpecName "kube-api-access-6nc4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.165099 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6nc4m\" (UniqueName: \"kubernetes.io/projected/3a9c996b-e39a-4fe9-83c8-81622c145fa3-kube-api-access-6nc4m\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.165432 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.390611 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a9c996b-e39a-4fe9-83c8-81622c145fa3" (UID: "3a9c996b-e39a-4fe9-83c8-81622c145fa3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.469033 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a9c996b-e39a-4fe9-83c8-81622c145fa3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.472669 4932 generic.go:334] "Generic (PLEG): container finished" podID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" containerID="9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743" exitCode=0 Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.472722 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6h7d9" Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.472725 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6h7d9" event={"ID":"3a9c996b-e39a-4fe9-83c8-81622c145fa3","Type":"ContainerDied","Data":"9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743"} Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.472837 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6h7d9" event={"ID":"3a9c996b-e39a-4fe9-83c8-81622c145fa3","Type":"ContainerDied","Data":"e778de0125b9692ce30dfab01cab9c9174c8c3f8b3bbf84c7444cc66af68d2e1"} Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.472861 4932 scope.go:117] "RemoveContainer" containerID="9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743" Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.501618 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6h7d9"] Nov 25 09:02:37 crc kubenswrapper[4932]: I1125 09:02:37.504733 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6h7d9"] Nov 25 09:02:38 crc kubenswrapper[4932]: I1125 09:02:38.612965 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" path="/var/lib/kubelet/pods/3a9c996b-e39a-4fe9-83c8-81622c145fa3/volumes" Nov 25 09:02:38 crc kubenswrapper[4932]: I1125 09:02:38.739584 4932 scope.go:117] "RemoveContainer" containerID="a1af6fff513fdb74e719ed525bfc1dabac6e91c59aeca509cddf0adbc7953766" Nov 25 09:02:38 crc kubenswrapper[4932]: I1125 09:02:38.759565 4932 scope.go:117] "RemoveContainer" containerID="414a2e39af89cdd1319189846bd171f7f84eec657032243e69a11b953fe5dc02" Nov 25 09:02:38 crc kubenswrapper[4932]: I1125 09:02:38.793043 4932 scope.go:117] "RemoveContainer" containerID="9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743" Nov 25 09:02:38 crc kubenswrapper[4932]: E1125 09:02:38.794040 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743\": container with ID starting with 9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743 not found: ID does not exist" containerID="9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743" Nov 25 09:02:38 crc kubenswrapper[4932]: I1125 09:02:38.794103 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743"} err="failed to get container status \"9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743\": rpc error: code = NotFound desc = could not find container \"9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743\": container with ID starting with 9f0859e220c91d7e0ab18c1fe2b5960f84bacbeb8e1cd1400f5144295555f743 not found: ID does not exist" Nov 25 09:02:38 crc kubenswrapper[4932]: I1125 09:02:38.794135 4932 scope.go:117] "RemoveContainer" containerID="a1af6fff513fdb74e719ed525bfc1dabac6e91c59aeca509cddf0adbc7953766" Nov 25 09:02:38 crc kubenswrapper[4932]: E1125 09:02:38.794864 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1af6fff513fdb74e719ed525bfc1dabac6e91c59aeca509cddf0adbc7953766\": container with ID starting with a1af6fff513fdb74e719ed525bfc1dabac6e91c59aeca509cddf0adbc7953766 not found: ID does not exist" containerID="a1af6fff513fdb74e719ed525bfc1dabac6e91c59aeca509cddf0adbc7953766" Nov 25 09:02:38 crc kubenswrapper[4932]: I1125 09:02:38.794899 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1af6fff513fdb74e719ed525bfc1dabac6e91c59aeca509cddf0adbc7953766"} err="failed to get container status \"a1af6fff513fdb74e719ed525bfc1dabac6e91c59aeca509cddf0adbc7953766\": rpc error: code = NotFound desc = could not find container \"a1af6fff513fdb74e719ed525bfc1dabac6e91c59aeca509cddf0adbc7953766\": container with ID starting with a1af6fff513fdb74e719ed525bfc1dabac6e91c59aeca509cddf0adbc7953766 not found: ID does not exist" Nov 25 09:02:38 crc kubenswrapper[4932]: I1125 09:02:38.794921 4932 scope.go:117] "RemoveContainer" containerID="414a2e39af89cdd1319189846bd171f7f84eec657032243e69a11b953fe5dc02" Nov 25 09:02:38 crc kubenswrapper[4932]: E1125 09:02:38.795344 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"414a2e39af89cdd1319189846bd171f7f84eec657032243e69a11b953fe5dc02\": container with ID starting with 414a2e39af89cdd1319189846bd171f7f84eec657032243e69a11b953fe5dc02 not found: ID does not exist" containerID="414a2e39af89cdd1319189846bd171f7f84eec657032243e69a11b953fe5dc02" Nov 25 09:02:38 crc kubenswrapper[4932]: I1125 09:02:38.795383 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"414a2e39af89cdd1319189846bd171f7f84eec657032243e69a11b953fe5dc02"} err="failed to get container status \"414a2e39af89cdd1319189846bd171f7f84eec657032243e69a11b953fe5dc02\": rpc error: code = NotFound desc = could not find container \"414a2e39af89cdd1319189846bd171f7f84eec657032243e69a11b953fe5dc02\": container with ID starting with 414a2e39af89cdd1319189846bd171f7f84eec657032243e69a11b953fe5dc02 not found: ID does not exist" Nov 25 09:02:40 crc kubenswrapper[4932]: I1125 09:02:40.496832 4932 generic.go:334] "Generic (PLEG): container finished" podID="d863485f-e172-41ed-8e1b-f6c5b7dff720" containerID="064dd6852db26de1003dac7f883f290bc2d1d5d10158cc51224f24525380d824" exitCode=0 Nov 25 09:02:40 crc kubenswrapper[4932]: I1125 09:02:40.496897 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75b46" event={"ID":"d863485f-e172-41ed-8e1b-f6c5b7dff720","Type":"ContainerDied","Data":"064dd6852db26de1003dac7f883f290bc2d1d5d10158cc51224f24525380d824"} Nov 25 09:02:42 crc kubenswrapper[4932]: I1125 09:02:42.510288 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75b46" event={"ID":"d863485f-e172-41ed-8e1b-f6c5b7dff720","Type":"ContainerStarted","Data":"a9dadaf9243be34c108cd870b63bfe1639007dae5068920fdc7877a8e59d336f"} Nov 25 09:02:42 crc kubenswrapper[4932]: I1125 09:02:42.536134 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-75b46" podStartSLOduration=2.66844948 podStartE2EDuration="9.536118951s" podCreationTimestamp="2025-11-25 09:02:33 +0000 UTC" firstStartedPulling="2025-11-25 09:02:34.433566794 +0000 UTC m=+814.559596357" lastFinishedPulling="2025-11-25 09:02:41.301236275 +0000 UTC m=+821.427265828" observedRunningTime="2025-11-25 09:02:42.533812922 +0000 UTC m=+822.659842485" watchObservedRunningTime="2025-11-25 09:02:42.536118951 +0000 UTC m=+822.662148514" Nov 25 09:02:43 crc kubenswrapper[4932]: I1125 09:02:43.633100 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:43 crc kubenswrapper[4932]: I1125 09:02:43.633162 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:43 crc kubenswrapper[4932]: I1125 09:02:43.675753 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:53 crc kubenswrapper[4932]: I1125 09:02:53.677402 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-75b46" Nov 25 09:02:54 crc kubenswrapper[4932]: I1125 09:02:54.938559 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.614905 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb"] Nov 25 09:02:55 crc kubenswrapper[4932]: E1125 09:02:55.615440 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" containerName="extract-utilities" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.615452 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" containerName="extract-utilities" Nov 25 09:02:55 crc kubenswrapper[4932]: E1125 09:02:55.615465 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" containerName="registry-server" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.615471 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" containerName="registry-server" Nov 25 09:02:55 crc kubenswrapper[4932]: E1125 09:02:55.615484 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" containerName="extract-content" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.615490 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" containerName="extract-content" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.615589 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a9c996b-e39a-4fe9-83c8-81622c145fa3" containerName="registry-server" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.615944 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.620837 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.620880 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-7mqtl" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.635502 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb"] Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.639910 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-2vpc5"] Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.642154 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.643707 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.643949 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.656334 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6qlh\" (UniqueName: \"kubernetes.io/projected/90887994-5f04-4a8a-abd7-6e6e6d1240f4-kube-api-access-g6qlh\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.656377 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/90887994-5f04-4a8a-abd7-6e6e6d1240f4-frr-startup\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.656430 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7bd05772-0291-41f9-9a20-9eee4249c7a9-cert\") pod \"frr-k8s-webhook-server-6998585d5-zqpkb\" (UID: \"7bd05772-0291-41f9-9a20-9eee4249c7a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.656458 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5x6l\" (UniqueName: \"kubernetes.io/projected/7bd05772-0291-41f9-9a20-9eee4249c7a9-kube-api-access-g5x6l\") pod \"frr-k8s-webhook-server-6998585d5-zqpkb\" (UID: \"7bd05772-0291-41f9-9a20-9eee4249c7a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.656481 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/90887994-5f04-4a8a-abd7-6e6e6d1240f4-metrics-certs\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.656510 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-frr-sockets\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.656531 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-frr-conf\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.656554 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-metrics\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.656638 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-reloader\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.717902 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-2b5bv"] Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.718986 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-2b5bv" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.722769 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.722797 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.722830 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.723597 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-nxgm7" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.731842 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-75b46"] Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.745577 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-glq9k"] Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.746684 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.749483 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757361 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6qlh\" (UniqueName: \"kubernetes.io/projected/90887994-5f04-4a8a-abd7-6e6e6d1240f4-kube-api-access-g6qlh\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757410 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/90887994-5f04-4a8a-abd7-6e6e6d1240f4-frr-startup\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757449 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-memberlist\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757484 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7bd05772-0291-41f9-9a20-9eee4249c7a9-cert\") pod \"frr-k8s-webhook-server-6998585d5-zqpkb\" (UID: \"7bd05772-0291-41f9-9a20-9eee4249c7a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757513 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5x6l\" (UniqueName: \"kubernetes.io/projected/7bd05772-0291-41f9-9a20-9eee4249c7a9-kube-api-access-g5x6l\") pod \"frr-k8s-webhook-server-6998585d5-zqpkb\" (UID: \"7bd05772-0291-41f9-9a20-9eee4249c7a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757535 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/90887994-5f04-4a8a-abd7-6e6e6d1240f4-metrics-certs\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757560 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/97beb067-4906-47af-810e-e3ddfc2d5ba1-metallb-excludel2\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757589 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-frr-sockets\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757611 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-frr-conf\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757651 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-metrics\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757692 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-metrics-certs\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757729 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vpdp\" (UniqueName: \"kubernetes.io/projected/97beb067-4906-47af-810e-e3ddfc2d5ba1-kube-api-access-7vpdp\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.757789 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-reloader\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: E1125 09:02:55.757939 4932 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 25 09:02:55 crc kubenswrapper[4932]: E1125 09:02:55.757990 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7bd05772-0291-41f9-9a20-9eee4249c7a9-cert podName:7bd05772-0291-41f9-9a20-9eee4249c7a9 nodeName:}" failed. No retries permitted until 2025-11-25 09:02:56.257971269 +0000 UTC m=+836.384000922 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/7bd05772-0291-41f9-9a20-9eee4249c7a9-cert") pod "frr-k8s-webhook-server-6998585d5-zqpkb" (UID: "7bd05772-0291-41f9-9a20-9eee4249c7a9") : secret "frr-k8s-webhook-server-cert" not found Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.758238 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-reloader\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: E1125 09:02:55.758415 4932 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 25 09:02:55 crc kubenswrapper[4932]: E1125 09:02:55.758563 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/90887994-5f04-4a8a-abd7-6e6e6d1240f4-metrics-certs podName:90887994-5f04-4a8a-abd7-6e6e6d1240f4 nodeName:}" failed. No retries permitted until 2025-11-25 09:02:56.258537613 +0000 UTC m=+836.384567246 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/90887994-5f04-4a8a-abd7-6e6e6d1240f4-metrics-certs") pod "frr-k8s-2vpc5" (UID: "90887994-5f04-4a8a-abd7-6e6e6d1240f4") : secret "frr-k8s-certs-secret" not found Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.758455 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-frr-conf\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.758599 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-metrics\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.758723 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/90887994-5f04-4a8a-abd7-6e6e6d1240f4-frr-sockets\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.758928 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/90887994-5f04-4a8a-abd7-6e6e6d1240f4-frr-startup\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.765811 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-glq9k"] Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.785127 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6qlh\" (UniqueName: \"kubernetes.io/projected/90887994-5f04-4a8a-abd7-6e6e6d1240f4-kube-api-access-g6qlh\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.785159 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5x6l\" (UniqueName: \"kubernetes.io/projected/7bd05772-0291-41f9-9a20-9eee4249c7a9-kube-api-access-g5x6l\") pod \"frr-k8s-webhook-server-6998585d5-zqpkb\" (UID: \"7bd05772-0291-41f9-9a20-9eee4249c7a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.868959 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4bac6992-e5a0-4ac7-aa2b-8606644a36ef-metrics-certs\") pod \"controller-6c7b4b5f48-glq9k\" (UID: \"4bac6992-e5a0-4ac7-aa2b-8606644a36ef\") " pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.869071 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-memberlist\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:55 crc kubenswrapper[4932]: E1125 09:02:55.869177 4932 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 09:02:55 crc kubenswrapper[4932]: E1125 09:02:55.869241 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-memberlist podName:97beb067-4906-47af-810e-e3ddfc2d5ba1 nodeName:}" failed. No retries permitted until 2025-11-25 09:02:56.369226265 +0000 UTC m=+836.495255828 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-memberlist") pod "speaker-2b5bv" (UID: "97beb067-4906-47af-810e-e3ddfc2d5ba1") : secret "metallb-memberlist" not found Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.870705 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/97beb067-4906-47af-810e-e3ddfc2d5ba1-metallb-excludel2\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.870808 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-metrics-certs\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.870840 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltwpm\" (UniqueName: \"kubernetes.io/projected/4bac6992-e5a0-4ac7-aa2b-8606644a36ef-kube-api-access-ltwpm\") pod \"controller-6c7b4b5f48-glq9k\" (UID: \"4bac6992-e5a0-4ac7-aa2b-8606644a36ef\") " pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.870862 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4bac6992-e5a0-4ac7-aa2b-8606644a36ef-cert\") pod \"controller-6c7b4b5f48-glq9k\" (UID: \"4bac6992-e5a0-4ac7-aa2b-8606644a36ef\") " pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.870890 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vpdp\" (UniqueName: \"kubernetes.io/projected/97beb067-4906-47af-810e-e3ddfc2d5ba1-kube-api-access-7vpdp\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:55 crc kubenswrapper[4932]: E1125 09:02:55.871286 4932 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 25 09:02:55 crc kubenswrapper[4932]: E1125 09:02:55.871320 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-metrics-certs podName:97beb067-4906-47af-810e-e3ddfc2d5ba1 nodeName:}" failed. No retries permitted until 2025-11-25 09:02:56.371311968 +0000 UTC m=+836.497341521 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-metrics-certs") pod "speaker-2b5bv" (UID: "97beb067-4906-47af-810e-e3ddfc2d5ba1") : secret "speaker-certs-secret" not found Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.873527 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/97beb067-4906-47af-810e-e3ddfc2d5ba1-metallb-excludel2\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.899222 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vpdp\" (UniqueName: \"kubernetes.io/projected/97beb067-4906-47af-810e-e3ddfc2d5ba1-kube-api-access-7vpdp\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.973269 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltwpm\" (UniqueName: \"kubernetes.io/projected/4bac6992-e5a0-4ac7-aa2b-8606644a36ef-kube-api-access-ltwpm\") pod \"controller-6c7b4b5f48-glq9k\" (UID: \"4bac6992-e5a0-4ac7-aa2b-8606644a36ef\") " pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.973319 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4bac6992-e5a0-4ac7-aa2b-8606644a36ef-cert\") pod \"controller-6c7b4b5f48-glq9k\" (UID: \"4bac6992-e5a0-4ac7-aa2b-8606644a36ef\") " pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.973395 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4bac6992-e5a0-4ac7-aa2b-8606644a36ef-metrics-certs\") pod \"controller-6c7b4b5f48-glq9k\" (UID: \"4bac6992-e5a0-4ac7-aa2b-8606644a36ef\") " pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.977114 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4bac6992-e5a0-4ac7-aa2b-8606644a36ef-metrics-certs\") pod \"controller-6c7b4b5f48-glq9k\" (UID: \"4bac6992-e5a0-4ac7-aa2b-8606644a36ef\") " pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.985159 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4bac6992-e5a0-4ac7-aa2b-8606644a36ef-cert\") pod \"controller-6c7b4b5f48-glq9k\" (UID: \"4bac6992-e5a0-4ac7-aa2b-8606644a36ef\") " pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:55 crc kubenswrapper[4932]: I1125 09:02:55.998985 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltwpm\" (UniqueName: \"kubernetes.io/projected/4bac6992-e5a0-4ac7-aa2b-8606644a36ef-kube-api-access-ltwpm\") pod \"controller-6c7b4b5f48-glq9k\" (UID: \"4bac6992-e5a0-4ac7-aa2b-8606644a36ef\") " pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.061281 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.102338 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bfvv5"] Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.102610 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bfvv5" podUID="7af73cde-771e-4ea0-b5c1-549cdee5181f" containerName="registry-server" containerID="cri-o://e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7" gracePeriod=2 Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.277112 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7bd05772-0291-41f9-9a20-9eee4249c7a9-cert\") pod \"frr-k8s-webhook-server-6998585d5-zqpkb\" (UID: \"7bd05772-0291-41f9-9a20-9eee4249c7a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.277496 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/90887994-5f04-4a8a-abd7-6e6e6d1240f4-metrics-certs\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.282517 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/90887994-5f04-4a8a-abd7-6e6e6d1240f4-metrics-certs\") pod \"frr-k8s-2vpc5\" (UID: \"90887994-5f04-4a8a-abd7-6e6e6d1240f4\") " pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.282634 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7bd05772-0291-41f9-9a20-9eee4249c7a9-cert\") pod \"frr-k8s-webhook-server-6998585d5-zqpkb\" (UID: \"7bd05772-0291-41f9-9a20-9eee4249c7a9\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.379049 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-memberlist\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.379131 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-metrics-certs\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:56 crc kubenswrapper[4932]: E1125 09:02:56.379252 4932 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 09:02:56 crc kubenswrapper[4932]: E1125 09:02:56.379321 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-memberlist podName:97beb067-4906-47af-810e-e3ddfc2d5ba1 nodeName:}" failed. No retries permitted until 2025-11-25 09:02:57.379302235 +0000 UTC m=+837.505331798 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-memberlist") pod "speaker-2b5bv" (UID: "97beb067-4906-47af-810e-e3ddfc2d5ba1") : secret "metallb-memberlist" not found Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.383506 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-metrics-certs\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.505785 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.535871 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.559146 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.580949 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-catalog-content\") pod \"7af73cde-771e-4ea0-b5c1-549cdee5181f\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.581062 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-utilities\") pod \"7af73cde-771e-4ea0-b5c1-549cdee5181f\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.581137 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zzg8\" (UniqueName: \"kubernetes.io/projected/7af73cde-771e-4ea0-b5c1-549cdee5181f-kube-api-access-5zzg8\") pod \"7af73cde-771e-4ea0-b5c1-549cdee5181f\" (UID: \"7af73cde-771e-4ea0-b5c1-549cdee5181f\") " Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.583220 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-utilities" (OuterVolumeSpecName: "utilities") pod "7af73cde-771e-4ea0-b5c1-549cdee5181f" (UID: "7af73cde-771e-4ea0-b5c1-549cdee5181f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.586886 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-glq9k"] Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.587653 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7af73cde-771e-4ea0-b5c1-549cdee5181f-kube-api-access-5zzg8" (OuterVolumeSpecName: "kube-api-access-5zzg8") pod "7af73cde-771e-4ea0-b5c1-549cdee5181f" (UID: "7af73cde-771e-4ea0-b5c1-549cdee5181f"). InnerVolumeSpecName "kube-api-access-5zzg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.598132 4932 generic.go:334] "Generic (PLEG): container finished" podID="7af73cde-771e-4ea0-b5c1-549cdee5181f" containerID="e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7" exitCode=0 Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.598175 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfvv5" event={"ID":"7af73cde-771e-4ea0-b5c1-549cdee5181f","Type":"ContainerDied","Data":"e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7"} Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.598216 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bfvv5" event={"ID":"7af73cde-771e-4ea0-b5c1-549cdee5181f","Type":"ContainerDied","Data":"da06a75c1280a801b30717a498efc51e89fab0ccd41824475f4331c540d78a1f"} Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.598228 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bfvv5" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.598234 4932 scope.go:117] "RemoveContainer" containerID="e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.623398 4932 scope.go:117] "RemoveContainer" containerID="41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.652782 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7af73cde-771e-4ea0-b5c1-549cdee5181f" (UID: "7af73cde-771e-4ea0-b5c1-549cdee5181f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.665782 4932 scope.go:117] "RemoveContainer" containerID="70fd2fadf1d54985bb09a0fd5d68be422d3360a05690aeb0c66d6c926a433387" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.685084 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.685111 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zzg8\" (UniqueName: \"kubernetes.io/projected/7af73cde-771e-4ea0-b5c1-549cdee5181f-kube-api-access-5zzg8\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.685121 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7af73cde-771e-4ea0-b5c1-549cdee5181f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.690726 4932 scope.go:117] "RemoveContainer" containerID="e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7" Nov 25 09:02:56 crc kubenswrapper[4932]: E1125 09:02:56.695821 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7\": container with ID starting with e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7 not found: ID does not exist" containerID="e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.695878 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7"} err="failed to get container status \"e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7\": rpc error: code = NotFound desc = could not find container \"e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7\": container with ID starting with e997a01936dfbc2913e5b4cf50a3cf2f9240abf892bfc9fceac98d6dd50bb2d7 not found: ID does not exist" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.695913 4932 scope.go:117] "RemoveContainer" containerID="41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029" Nov 25 09:02:56 crc kubenswrapper[4932]: E1125 09:02:56.699210 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029\": container with ID starting with 41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029 not found: ID does not exist" containerID="41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.699278 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029"} err="failed to get container status \"41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029\": rpc error: code = NotFound desc = could not find container \"41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029\": container with ID starting with 41c5dc15e64630cc33771931faf52585cf9024d30b0816432800545763b2e029 not found: ID does not exist" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.699318 4932 scope.go:117] "RemoveContainer" containerID="70fd2fadf1d54985bb09a0fd5d68be422d3360a05690aeb0c66d6c926a433387" Nov 25 09:02:56 crc kubenswrapper[4932]: E1125 09:02:56.701155 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70fd2fadf1d54985bb09a0fd5d68be422d3360a05690aeb0c66d6c926a433387\": container with ID starting with 70fd2fadf1d54985bb09a0fd5d68be422d3360a05690aeb0c66d6c926a433387 not found: ID does not exist" containerID="70fd2fadf1d54985bb09a0fd5d68be422d3360a05690aeb0c66d6c926a433387" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.701211 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70fd2fadf1d54985bb09a0fd5d68be422d3360a05690aeb0c66d6c926a433387"} err="failed to get container status \"70fd2fadf1d54985bb09a0fd5d68be422d3360a05690aeb0c66d6c926a433387\": rpc error: code = NotFound desc = could not find container \"70fd2fadf1d54985bb09a0fd5d68be422d3360a05690aeb0c66d6c926a433387\": container with ID starting with 70fd2fadf1d54985bb09a0fd5d68be422d3360a05690aeb0c66d6c926a433387 not found: ID does not exist" Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.932131 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bfvv5"] Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.937245 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bfvv5"] Nov 25 09:02:56 crc kubenswrapper[4932]: I1125 09:02:56.969539 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb"] Nov 25 09:02:57 crc kubenswrapper[4932]: I1125 09:02:57.406608 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-memberlist\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:57 crc kubenswrapper[4932]: I1125 09:02:57.415034 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/97beb067-4906-47af-810e-e3ddfc2d5ba1-memberlist\") pod \"speaker-2b5bv\" (UID: \"97beb067-4906-47af-810e-e3ddfc2d5ba1\") " pod="metallb-system/speaker-2b5bv" Nov 25 09:02:57 crc kubenswrapper[4932]: I1125 09:02:57.541448 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-2b5bv" Nov 25 09:02:57 crc kubenswrapper[4932]: W1125 09:02:57.569445 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod97beb067_4906_47af_810e_e3ddfc2d5ba1.slice/crio-494e6a185b69ca81d138444b0ea67f036f59ba80f6a7176448961cc0d7fdc5bd WatchSource:0}: Error finding container 494e6a185b69ca81d138444b0ea67f036f59ba80f6a7176448961cc0d7fdc5bd: Status 404 returned error can't find the container with id 494e6a185b69ca81d138444b0ea67f036f59ba80f6a7176448961cc0d7fdc5bd Nov 25 09:02:57 crc kubenswrapper[4932]: I1125 09:02:57.605748 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" event={"ID":"7bd05772-0291-41f9-9a20-9eee4249c7a9","Type":"ContainerStarted","Data":"49b671526db46193d34ccc77b57d23c3d01d318cfae76a9f00dad18fb56e78d1"} Nov 25 09:02:57 crc kubenswrapper[4932]: I1125 09:02:57.607550 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2vpc5" event={"ID":"90887994-5f04-4a8a-abd7-6e6e6d1240f4","Type":"ContainerStarted","Data":"e8850a7ad482c9dc78af271a9ae03c4b8534c28f9d7e72f40e91091a62fe7afb"} Nov 25 09:02:57 crc kubenswrapper[4932]: I1125 09:02:57.613391 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2b5bv" event={"ID":"97beb067-4906-47af-810e-e3ddfc2d5ba1","Type":"ContainerStarted","Data":"494e6a185b69ca81d138444b0ea67f036f59ba80f6a7176448961cc0d7fdc5bd"} Nov 25 09:02:57 crc kubenswrapper[4932]: I1125 09:02:57.615681 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-glq9k" event={"ID":"4bac6992-e5a0-4ac7-aa2b-8606644a36ef","Type":"ContainerStarted","Data":"157f9dbc102667e248afb5be683bbe8ac565cb19dbdeffeda67d41f0344110d9"} Nov 25 09:02:57 crc kubenswrapper[4932]: I1125 09:02:57.615706 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-glq9k" event={"ID":"4bac6992-e5a0-4ac7-aa2b-8606644a36ef","Type":"ContainerStarted","Data":"a5194a614ba53f173a1dd3dbeda9474840765a7ff42443390ebb7622fb49ef04"} Nov 25 09:02:57 crc kubenswrapper[4932]: I1125 09:02:57.615719 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-glq9k" event={"ID":"4bac6992-e5a0-4ac7-aa2b-8606644a36ef","Type":"ContainerStarted","Data":"cdabfca3293d206dba8330aa4b8ba21e500e1b3748d1e00018bc269951e3eea3"} Nov 25 09:02:57 crc kubenswrapper[4932]: I1125 09:02:57.615888 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:02:57 crc kubenswrapper[4932]: I1125 09:02:57.635467 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-glq9k" podStartSLOduration=2.635425371 podStartE2EDuration="2.635425371s" podCreationTimestamp="2025-11-25 09:02:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:02:57.633558513 +0000 UTC m=+837.759588096" watchObservedRunningTime="2025-11-25 09:02:57.635425371 +0000 UTC m=+837.761454934" Nov 25 09:02:58 crc kubenswrapper[4932]: I1125 09:02:58.617566 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7af73cde-771e-4ea0-b5c1-549cdee5181f" path="/var/lib/kubelet/pods/7af73cde-771e-4ea0-b5c1-549cdee5181f/volumes" Nov 25 09:02:58 crc kubenswrapper[4932]: I1125 09:02:58.630514 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2b5bv" event={"ID":"97beb067-4906-47af-810e-e3ddfc2d5ba1","Type":"ContainerStarted","Data":"1a92dc46fce3e5f788b1817beff9832758ce2cd3daed1c2d9ba36ce617f83b0e"} Nov 25 09:02:58 crc kubenswrapper[4932]: I1125 09:02:58.630614 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-2b5bv" event={"ID":"97beb067-4906-47af-810e-e3ddfc2d5ba1","Type":"ContainerStarted","Data":"9bc502850b71784d4c6fc01f0d63c2f21e9cd6882fafd7ae79c802e6991e8a02"} Nov 25 09:02:58 crc kubenswrapper[4932]: I1125 09:02:58.630778 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-2b5bv" Nov 25 09:02:58 crc kubenswrapper[4932]: I1125 09:02:58.648494 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-2b5bv" podStartSLOduration=3.64846595 podStartE2EDuration="3.64846595s" podCreationTimestamp="2025-11-25 09:02:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:02:58.648167602 +0000 UTC m=+838.774197165" watchObservedRunningTime="2025-11-25 09:02:58.64846595 +0000 UTC m=+838.774495513" Nov 25 09:03:05 crc kubenswrapper[4932]: I1125 09:03:05.672675 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" event={"ID":"7bd05772-0291-41f9-9a20-9eee4249c7a9","Type":"ContainerStarted","Data":"b6e7490c71fe16dc46370d6f8ac65b1636f04c8fc5b635a95876472dc80d14df"} Nov 25 09:03:05 crc kubenswrapper[4932]: I1125 09:03:05.673332 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" Nov 25 09:03:05 crc kubenswrapper[4932]: I1125 09:03:05.675931 4932 generic.go:334] "Generic (PLEG): container finished" podID="90887994-5f04-4a8a-abd7-6e6e6d1240f4" containerID="eb7755f81dc8bc1474256523ca75830f7d3055f38ca7d2b009bed69569f26e67" exitCode=0 Nov 25 09:03:05 crc kubenswrapper[4932]: I1125 09:03:05.675992 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2vpc5" event={"ID":"90887994-5f04-4a8a-abd7-6e6e6d1240f4","Type":"ContainerDied","Data":"eb7755f81dc8bc1474256523ca75830f7d3055f38ca7d2b009bed69569f26e67"} Nov 25 09:03:05 crc kubenswrapper[4932]: I1125 09:03:05.712717 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" podStartSLOduration=2.314729101 podStartE2EDuration="10.712692376s" podCreationTimestamp="2025-11-25 09:02:55 +0000 UTC" firstStartedPulling="2025-11-25 09:02:56.97693716 +0000 UTC m=+837.102966723" lastFinishedPulling="2025-11-25 09:03:05.374900425 +0000 UTC m=+845.500929998" observedRunningTime="2025-11-25 09:03:05.691416566 +0000 UTC m=+845.817446139" watchObservedRunningTime="2025-11-25 09:03:05.712692376 +0000 UTC m=+845.838721939" Nov 25 09:03:06 crc kubenswrapper[4932]: I1125 09:03:06.066771 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-glq9k" Nov 25 09:03:06 crc kubenswrapper[4932]: I1125 09:03:06.684491 4932 generic.go:334] "Generic (PLEG): container finished" podID="90887994-5f04-4a8a-abd7-6e6e6d1240f4" containerID="ff716da1299a868fe3664e3a1fdd76b973d82ad2002876469111e4c5cd2b01cf" exitCode=0 Nov 25 09:03:06 crc kubenswrapper[4932]: I1125 09:03:06.685495 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2vpc5" event={"ID":"90887994-5f04-4a8a-abd7-6e6e6d1240f4","Type":"ContainerDied","Data":"ff716da1299a868fe3664e3a1fdd76b973d82ad2002876469111e4c5cd2b01cf"} Nov 25 09:03:07 crc kubenswrapper[4932]: I1125 09:03:07.544951 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-2b5bv" Nov 25 09:03:07 crc kubenswrapper[4932]: I1125 09:03:07.705804 4932 generic.go:334] "Generic (PLEG): container finished" podID="90887994-5f04-4a8a-abd7-6e6e6d1240f4" containerID="08c9f6b23b35f5ee2be9ee06403f0bd96d0c453d3241934f97fd10d75fe35635" exitCode=0 Nov 25 09:03:07 crc kubenswrapper[4932]: I1125 09:03:07.705909 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2vpc5" event={"ID":"90887994-5f04-4a8a-abd7-6e6e6d1240f4","Type":"ContainerDied","Data":"08c9f6b23b35f5ee2be9ee06403f0bd96d0c453d3241934f97fd10d75fe35635"} Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.714416 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2vpc5" event={"ID":"90887994-5f04-4a8a-abd7-6e6e6d1240f4","Type":"ContainerStarted","Data":"fce89546edf8a69666900581a9fae5f3578bd783f54bb8c646234a3ad78238ff"} Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.714808 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2vpc5" event={"ID":"90887994-5f04-4a8a-abd7-6e6e6d1240f4","Type":"ContainerStarted","Data":"29afb8363402f359558abd3982327a881d0aa3113bdbbdcbc6e75e50e5d08338"} Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.714825 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2vpc5" event={"ID":"90887994-5f04-4a8a-abd7-6e6e6d1240f4","Type":"ContainerStarted","Data":"af7ef2ce27b37e889baa1be40bbd6b9c0ac1e925e1ed13aa37762bd9aabe678e"} Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.714838 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2vpc5" event={"ID":"90887994-5f04-4a8a-abd7-6e6e6d1240f4","Type":"ContainerStarted","Data":"860e9c30b0c717353cbf976b65a87798d3ff0782883a36f711ae3e3fce2a913b"} Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.934448 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc"] Nov 25 09:03:08 crc kubenswrapper[4932]: E1125 09:03:08.934744 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7af73cde-771e-4ea0-b5c1-549cdee5181f" containerName="extract-content" Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.934765 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7af73cde-771e-4ea0-b5c1-549cdee5181f" containerName="extract-content" Nov 25 09:03:08 crc kubenswrapper[4932]: E1125 09:03:08.934787 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7af73cde-771e-4ea0-b5c1-549cdee5181f" containerName="registry-server" Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.934795 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7af73cde-771e-4ea0-b5c1-549cdee5181f" containerName="registry-server" Nov 25 09:03:08 crc kubenswrapper[4932]: E1125 09:03:08.934806 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7af73cde-771e-4ea0-b5c1-549cdee5181f" containerName="extract-utilities" Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.934816 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7af73cde-771e-4ea0-b5c1-549cdee5181f" containerName="extract-utilities" Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.934943 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="7af73cde-771e-4ea0-b5c1-549cdee5181f" containerName="registry-server" Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.935942 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.938418 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 09:03:08 crc kubenswrapper[4932]: I1125 09:03:08.944366 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc"] Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.064869 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.064918 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqwxl\" (UniqueName: \"kubernetes.io/projected/b94da386-38ab-4688-9c75-9dd7d781d901-kube-api-access-nqwxl\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.064961 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.166233 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.166339 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.166360 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqwxl\" (UniqueName: \"kubernetes.io/projected/b94da386-38ab-4688-9c75-9dd7d781d901-kube-api-access-nqwxl\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.166977 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.167053 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.185555 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqwxl\" (UniqueName: \"kubernetes.io/projected/b94da386-38ab-4688-9c75-9dd7d781d901-kube-api-access-nqwxl\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.260415 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.662485 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc"] Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.724494 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2vpc5" event={"ID":"90887994-5f04-4a8a-abd7-6e6e6d1240f4","Type":"ContainerStarted","Data":"f28b2a0c07200174038b8728d7b3153a519244546538b0803527307cbd3c5e64"} Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.724536 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-2vpc5" event={"ID":"90887994-5f04-4a8a-abd7-6e6e6d1240f4","Type":"ContainerStarted","Data":"f2a660f59a63dd72fcde7a8a1cfd15112f800b0dd6804cba8e9a16b4060472fa"} Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.724636 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.725502 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" event={"ID":"b94da386-38ab-4688-9c75-9dd7d781d901","Type":"ContainerStarted","Data":"c25ea8e2d991146dab46821c8a885bdfb03aaf31c278f368e0adb576ad3ad1ec"} Nov 25 09:03:09 crc kubenswrapper[4932]: I1125 09:03:09.745831 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-2vpc5" podStartSLOduration=6.129108177 podStartE2EDuration="14.745815666s" podCreationTimestamp="2025-11-25 09:02:55 +0000 UTC" firstStartedPulling="2025-11-25 09:02:56.735351172 +0000 UTC m=+836.861380735" lastFinishedPulling="2025-11-25 09:03:05.352058661 +0000 UTC m=+845.478088224" observedRunningTime="2025-11-25 09:03:09.745180278 +0000 UTC m=+849.871209841" watchObservedRunningTime="2025-11-25 09:03:09.745815666 +0000 UTC m=+849.871845229" Nov 25 09:03:10 crc kubenswrapper[4932]: I1125 09:03:10.735263 4932 generic.go:334] "Generic (PLEG): container finished" podID="b94da386-38ab-4688-9c75-9dd7d781d901" containerID="b32265790b50e654daa22477b43102a0788638f868429c4a2ad9eadb6281c9dd" exitCode=0 Nov 25 09:03:10 crc kubenswrapper[4932]: I1125 09:03:10.735320 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" event={"ID":"b94da386-38ab-4688-9c75-9dd7d781d901","Type":"ContainerDied","Data":"b32265790b50e654daa22477b43102a0788638f868429c4a2ad9eadb6281c9dd"} Nov 25 09:03:11 crc kubenswrapper[4932]: I1125 09:03:11.560853 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:03:11 crc kubenswrapper[4932]: I1125 09:03:11.611404 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:03:15 crc kubenswrapper[4932]: I1125 09:03:15.765622 4932 generic.go:334] "Generic (PLEG): container finished" podID="b94da386-38ab-4688-9c75-9dd7d781d901" containerID="4d45f6f098bb0719d4b3b867d2b223c03865fdaabbc95489f02bcd022a3be678" exitCode=0 Nov 25 09:03:15 crc kubenswrapper[4932]: I1125 09:03:15.765665 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" event={"ID":"b94da386-38ab-4688-9c75-9dd7d781d901","Type":"ContainerDied","Data":"4d45f6f098bb0719d4b3b867d2b223c03865fdaabbc95489f02bcd022a3be678"} Nov 25 09:03:16 crc kubenswrapper[4932]: I1125 09:03:16.540127 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-zqpkb" Nov 25 09:03:16 crc kubenswrapper[4932]: I1125 09:03:16.776602 4932 generic.go:334] "Generic (PLEG): container finished" podID="b94da386-38ab-4688-9c75-9dd7d781d901" containerID="5abe16d6830b392df52ec50bbbf24ab6d7a4a129f07c96cf75b7435e836ff0d6" exitCode=0 Nov 25 09:03:16 crc kubenswrapper[4932]: I1125 09:03:16.776654 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" event={"ID":"b94da386-38ab-4688-9c75-9dd7d781d901","Type":"ContainerDied","Data":"5abe16d6830b392df52ec50bbbf24ab6d7a4a129f07c96cf75b7435e836ff0d6"} Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.107837 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.218823 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqwxl\" (UniqueName: \"kubernetes.io/projected/b94da386-38ab-4688-9c75-9dd7d781d901-kube-api-access-nqwxl\") pod \"b94da386-38ab-4688-9c75-9dd7d781d901\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.218890 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-bundle\") pod \"b94da386-38ab-4688-9c75-9dd7d781d901\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.218919 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-util\") pod \"b94da386-38ab-4688-9c75-9dd7d781d901\" (UID: \"b94da386-38ab-4688-9c75-9dd7d781d901\") " Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.220290 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-bundle" (OuterVolumeSpecName: "bundle") pod "b94da386-38ab-4688-9c75-9dd7d781d901" (UID: "b94da386-38ab-4688-9c75-9dd7d781d901"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.224850 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b94da386-38ab-4688-9c75-9dd7d781d901-kube-api-access-nqwxl" (OuterVolumeSpecName: "kube-api-access-nqwxl") pod "b94da386-38ab-4688-9c75-9dd7d781d901" (UID: "b94da386-38ab-4688-9c75-9dd7d781d901"). InnerVolumeSpecName "kube-api-access-nqwxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.229060 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-util" (OuterVolumeSpecName: "util") pod "b94da386-38ab-4688-9c75-9dd7d781d901" (UID: "b94da386-38ab-4688-9c75-9dd7d781d901"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.320260 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqwxl\" (UniqueName: \"kubernetes.io/projected/b94da386-38ab-4688-9c75-9dd7d781d901-kube-api-access-nqwxl\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.320306 4932 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.320334 4932 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b94da386-38ab-4688-9c75-9dd7d781d901-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.791077 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" event={"ID":"b94da386-38ab-4688-9c75-9dd7d781d901","Type":"ContainerDied","Data":"c25ea8e2d991146dab46821c8a885bdfb03aaf31c278f368e0adb576ad3ad1ec"} Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.791117 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c25ea8e2d991146dab46821c8a885bdfb03aaf31c278f368e0adb576ad3ad1ec" Nov 25 09:03:18 crc kubenswrapper[4932]: I1125 09:03:18.791138 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931a9knhc" Nov 25 09:03:26 crc kubenswrapper[4932]: I1125 09:03:26.563347 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-2vpc5" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.149462 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k"] Nov 25 09:03:27 crc kubenswrapper[4932]: E1125 09:03:27.150101 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b94da386-38ab-4688-9c75-9dd7d781d901" containerName="util" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.150117 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b94da386-38ab-4688-9c75-9dd7d781d901" containerName="util" Nov 25 09:03:27 crc kubenswrapper[4932]: E1125 09:03:27.150130 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b94da386-38ab-4688-9c75-9dd7d781d901" containerName="pull" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.150138 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b94da386-38ab-4688-9c75-9dd7d781d901" containerName="pull" Nov 25 09:03:27 crc kubenswrapper[4932]: E1125 09:03:27.150151 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b94da386-38ab-4688-9c75-9dd7d781d901" containerName="extract" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.150159 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b94da386-38ab-4688-9c75-9dd7d781d901" containerName="extract" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.150293 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b94da386-38ab-4688-9c75-9dd7d781d901" containerName="extract" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.150731 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.153712 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.153782 4932 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-wttcj" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.154683 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.210011 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k"] Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.266972 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e225009e-5386-4f42-9b05-6ea7f97d9008-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qxp9k\" (UID: \"e225009e-5386-4f42-9b05-6ea7f97d9008\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.267112 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvz62\" (UniqueName: \"kubernetes.io/projected/e225009e-5386-4f42-9b05-6ea7f97d9008-kube-api-access-hvz62\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qxp9k\" (UID: \"e225009e-5386-4f42-9b05-6ea7f97d9008\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.369923 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvz62\" (UniqueName: \"kubernetes.io/projected/e225009e-5386-4f42-9b05-6ea7f97d9008-kube-api-access-hvz62\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qxp9k\" (UID: \"e225009e-5386-4f42-9b05-6ea7f97d9008\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.369985 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e225009e-5386-4f42-9b05-6ea7f97d9008-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qxp9k\" (UID: \"e225009e-5386-4f42-9b05-6ea7f97d9008\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.370480 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/e225009e-5386-4f42-9b05-6ea7f97d9008-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qxp9k\" (UID: \"e225009e-5386-4f42-9b05-6ea7f97d9008\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.397605 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvz62\" (UniqueName: \"kubernetes.io/projected/e225009e-5386-4f42-9b05-6ea7f97d9008-kube-api-access-hvz62\") pod \"cert-manager-operator-controller-manager-64cf6dff88-qxp9k\" (UID: \"e225009e-5386-4f42-9b05-6ea7f97d9008\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k" Nov 25 09:03:27 crc kubenswrapper[4932]: I1125 09:03:27.468705 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k" Nov 25 09:03:28 crc kubenswrapper[4932]: I1125 09:03:28.032016 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k"] Nov 25 09:03:28 crc kubenswrapper[4932]: I1125 09:03:28.851754 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k" event={"ID":"e225009e-5386-4f42-9b05-6ea7f97d9008","Type":"ContainerStarted","Data":"b3a6b1ba029108fad1cabe73c4863ac3eef723fbf8adc1873bc18ba0d20bc35c"} Nov 25 09:03:35 crc kubenswrapper[4932]: I1125 09:03:35.893992 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k" event={"ID":"e225009e-5386-4f42-9b05-6ea7f97d9008","Type":"ContainerStarted","Data":"49d1a956b9f024e49a7034fd8f8d9830471871a4bfb41a639cd5af7741aa1415"} Nov 25 09:03:35 crc kubenswrapper[4932]: I1125 09:03:35.912878 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-qxp9k" podStartSLOduration=1.781905886 podStartE2EDuration="8.912858417s" podCreationTimestamp="2025-11-25 09:03:27 +0000 UTC" firstStartedPulling="2025-11-25 09:03:28.041353812 +0000 UTC m=+868.167383375" lastFinishedPulling="2025-11-25 09:03:35.172306343 +0000 UTC m=+875.298335906" observedRunningTime="2025-11-25 09:03:35.909225875 +0000 UTC m=+876.035255448" watchObservedRunningTime="2025-11-25 09:03:35.912858417 +0000 UTC m=+876.038887980" Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.444401 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-s9m4t"] Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.445930 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.449166 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.449335 4932 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-spbrq" Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.449450 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.456656 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-s9m4t"] Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.511446 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd638a63-b23e-4787-bf68-246a68ec0e60-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-s9m4t\" (UID: \"fd638a63-b23e-4787-bf68-246a68ec0e60\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.511521 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brl2r\" (UniqueName: \"kubernetes.io/projected/fd638a63-b23e-4787-bf68-246a68ec0e60-kube-api-access-brl2r\") pod \"cert-manager-webhook-f4fb5df64-s9m4t\" (UID: \"fd638a63-b23e-4787-bf68-246a68ec0e60\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.612237 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd638a63-b23e-4787-bf68-246a68ec0e60-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-s9m4t\" (UID: \"fd638a63-b23e-4787-bf68-246a68ec0e60\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.612305 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brl2r\" (UniqueName: \"kubernetes.io/projected/fd638a63-b23e-4787-bf68-246a68ec0e60-kube-api-access-brl2r\") pod \"cert-manager-webhook-f4fb5df64-s9m4t\" (UID: \"fd638a63-b23e-4787-bf68-246a68ec0e60\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.631698 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd638a63-b23e-4787-bf68-246a68ec0e60-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-s9m4t\" (UID: \"fd638a63-b23e-4787-bf68-246a68ec0e60\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.634251 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brl2r\" (UniqueName: \"kubernetes.io/projected/fd638a63-b23e-4787-bf68-246a68ec0e60-kube-api-access-brl2r\") pod \"cert-manager-webhook-f4fb5df64-s9m4t\" (UID: \"fd638a63-b23e-4787-bf68-246a68ec0e60\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" Nov 25 09:03:39 crc kubenswrapper[4932]: I1125 09:03:39.763486 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.175766 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-s9m4t"] Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.667785 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j"] Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.668546 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.672025 4932 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-7xdst" Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.718722 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j"] Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.731933 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tstpl\" (UniqueName: \"kubernetes.io/projected/6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93-kube-api-access-tstpl\") pod \"cert-manager-cainjector-855d9ccff4-hdn5j\" (UID: \"6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.732020 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-hdn5j\" (UID: \"6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.833170 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tstpl\" (UniqueName: \"kubernetes.io/projected/6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93-kube-api-access-tstpl\") pod \"cert-manager-cainjector-855d9ccff4-hdn5j\" (UID: \"6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.833386 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-hdn5j\" (UID: \"6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.852042 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-hdn5j\" (UID: \"6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.865067 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tstpl\" (UniqueName: \"kubernetes.io/projected/6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93-kube-api-access-tstpl\") pod \"cert-manager-cainjector-855d9ccff4-hdn5j\" (UID: \"6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.928348 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" event={"ID":"fd638a63-b23e-4787-bf68-246a68ec0e60","Type":"ContainerStarted","Data":"71d553c704e80f96f5a8a06cdc20c13562a6b5f6a641bdd3e6c84c3faa9e8e24"} Nov 25 09:03:40 crc kubenswrapper[4932]: I1125 09:03:40.987545 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" Nov 25 09:03:41 crc kubenswrapper[4932]: I1125 09:03:41.455336 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j"] Nov 25 09:03:41 crc kubenswrapper[4932]: W1125 09:03:41.466838 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f9b2a8f_5fe6_4c38_ab9b_ef9e30db8d93.slice/crio-5ad5cbfbe3de3708f1105a767b7fb9c870377abf330920cc67a342260129ca83 WatchSource:0}: Error finding container 5ad5cbfbe3de3708f1105a767b7fb9c870377abf330920cc67a342260129ca83: Status 404 returned error can't find the container with id 5ad5cbfbe3de3708f1105a767b7fb9c870377abf330920cc67a342260129ca83 Nov 25 09:03:41 crc kubenswrapper[4932]: I1125 09:03:41.950413 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" event={"ID":"6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93","Type":"ContainerStarted","Data":"5ad5cbfbe3de3708f1105a767b7fb9c870377abf330920cc67a342260129ca83"} Nov 25 09:03:47 crc kubenswrapper[4932]: I1125 09:03:47.991177 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" event={"ID":"fd638a63-b23e-4787-bf68-246a68ec0e60","Type":"ContainerStarted","Data":"d47fe461c728e612b2e0f9aa288b2e9efc779d241eb547de84e92d0b2f2c7ec4"} Nov 25 09:03:47 crc kubenswrapper[4932]: I1125 09:03:47.991983 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" Nov 25 09:03:47 crc kubenswrapper[4932]: I1125 09:03:47.993268 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" event={"ID":"6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93","Type":"ContainerStarted","Data":"b4c4559bceccf81fdaab6f6b7cfb015be19956e6d2f5f89073fd72d3f0b4e368"} Nov 25 09:03:48 crc kubenswrapper[4932]: I1125 09:03:48.008298 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" podStartSLOduration=1.575602642 podStartE2EDuration="9.008278736s" podCreationTimestamp="2025-11-25 09:03:39 +0000 UTC" firstStartedPulling="2025-11-25 09:03:40.186817793 +0000 UTC m=+880.312847356" lastFinishedPulling="2025-11-25 09:03:47.619493887 +0000 UTC m=+887.745523450" observedRunningTime="2025-11-25 09:03:48.005461697 +0000 UTC m=+888.131491270" watchObservedRunningTime="2025-11-25 09:03:48.008278736 +0000 UTC m=+888.134308299" Nov 25 09:03:48 crc kubenswrapper[4932]: I1125 09:03:48.024565 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" podStartSLOduration=1.839361007 podStartE2EDuration="8.024548045s" podCreationTimestamp="2025-11-25 09:03:40 +0000 UTC" firstStartedPulling="2025-11-25 09:03:41.468565466 +0000 UTC m=+881.594595039" lastFinishedPulling="2025-11-25 09:03:47.653752524 +0000 UTC m=+887.779782077" observedRunningTime="2025-11-25 09:03:48.019527464 +0000 UTC m=+888.145557037" watchObservedRunningTime="2025-11-25 09:03:48.024548045 +0000 UTC m=+888.150577608" Nov 25 09:03:54 crc kubenswrapper[4932]: I1125 09:03:54.767735 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-s9m4t" Nov 25 09:03:57 crc kubenswrapper[4932]: I1125 09:03:57.598129 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-gv92q"] Nov 25 09:03:57 crc kubenswrapper[4932]: I1125 09:03:57.599804 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-gv92q" Nov 25 09:03:57 crc kubenswrapper[4932]: I1125 09:03:57.603523 4932 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-skjhc" Nov 25 09:03:57 crc kubenswrapper[4932]: I1125 09:03:57.619055 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-gv92q"] Nov 25 09:03:57 crc kubenswrapper[4932]: I1125 09:03:57.709213 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z976f\" (UniqueName: \"kubernetes.io/projected/84780d5c-5474-414f-8114-c501bc24f25a-kube-api-access-z976f\") pod \"cert-manager-86cb77c54b-gv92q\" (UID: \"84780d5c-5474-414f-8114-c501bc24f25a\") " pod="cert-manager/cert-manager-86cb77c54b-gv92q" Nov 25 09:03:57 crc kubenswrapper[4932]: I1125 09:03:57.709290 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/84780d5c-5474-414f-8114-c501bc24f25a-bound-sa-token\") pod \"cert-manager-86cb77c54b-gv92q\" (UID: \"84780d5c-5474-414f-8114-c501bc24f25a\") " pod="cert-manager/cert-manager-86cb77c54b-gv92q" Nov 25 09:03:57 crc kubenswrapper[4932]: I1125 09:03:57.810335 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/84780d5c-5474-414f-8114-c501bc24f25a-bound-sa-token\") pod \"cert-manager-86cb77c54b-gv92q\" (UID: \"84780d5c-5474-414f-8114-c501bc24f25a\") " pod="cert-manager/cert-manager-86cb77c54b-gv92q" Nov 25 09:03:57 crc kubenswrapper[4932]: I1125 09:03:57.810523 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z976f\" (UniqueName: \"kubernetes.io/projected/84780d5c-5474-414f-8114-c501bc24f25a-kube-api-access-z976f\") pod \"cert-manager-86cb77c54b-gv92q\" (UID: \"84780d5c-5474-414f-8114-c501bc24f25a\") " pod="cert-manager/cert-manager-86cb77c54b-gv92q" Nov 25 09:03:57 crc kubenswrapper[4932]: I1125 09:03:57.831821 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/84780d5c-5474-414f-8114-c501bc24f25a-bound-sa-token\") pod \"cert-manager-86cb77c54b-gv92q\" (UID: \"84780d5c-5474-414f-8114-c501bc24f25a\") " pod="cert-manager/cert-manager-86cb77c54b-gv92q" Nov 25 09:03:57 crc kubenswrapper[4932]: I1125 09:03:57.838519 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z976f\" (UniqueName: \"kubernetes.io/projected/84780d5c-5474-414f-8114-c501bc24f25a-kube-api-access-z976f\") pod \"cert-manager-86cb77c54b-gv92q\" (UID: \"84780d5c-5474-414f-8114-c501bc24f25a\") " pod="cert-manager/cert-manager-86cb77c54b-gv92q" Nov 25 09:03:57 crc kubenswrapper[4932]: I1125 09:03:57.919494 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-gv92q" Nov 25 09:03:58 crc kubenswrapper[4932]: I1125 09:03:58.152644 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-gv92q"] Nov 25 09:03:59 crc kubenswrapper[4932]: I1125 09:03:59.068033 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-gv92q" event={"ID":"84780d5c-5474-414f-8114-c501bc24f25a","Type":"ContainerStarted","Data":"9f40cb83e271d76d23bcd9b835091248f5fed01b189cdce746f34b54415351f3"} Nov 25 09:03:59 crc kubenswrapper[4932]: I1125 09:03:59.068415 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-gv92q" event={"ID":"84780d5c-5474-414f-8114-c501bc24f25a","Type":"ContainerStarted","Data":"638ac35d60deea98054bcc016dcffd99f3c2265207dbb3837e083c0be6730604"} Nov 25 09:03:59 crc kubenswrapper[4932]: I1125 09:03:59.082844 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-gv92q" podStartSLOduration=2.082828214 podStartE2EDuration="2.082828214s" podCreationTimestamp="2025-11-25 09:03:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:03:59.079306244 +0000 UTC m=+899.205335807" watchObservedRunningTime="2025-11-25 09:03:59.082828214 +0000 UTC m=+899.208857777" Nov 25 09:04:07 crc kubenswrapper[4932]: I1125 09:04:07.181542 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:04:07 crc kubenswrapper[4932]: I1125 09:04:07.182128 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:04:08 crc kubenswrapper[4932]: I1125 09:04:08.027039 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-8twpc"] Nov 25 09:04:08 crc kubenswrapper[4932]: I1125 09:04:08.028588 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8twpc" Nov 25 09:04:08 crc kubenswrapper[4932]: I1125 09:04:08.030747 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 09:04:08 crc kubenswrapper[4932]: I1125 09:04:08.031011 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-9kmsl" Nov 25 09:04:08 crc kubenswrapper[4932]: I1125 09:04:08.031134 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 09:04:08 crc kubenswrapper[4932]: I1125 09:04:08.086606 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8twpc"] Nov 25 09:04:08 crc kubenswrapper[4932]: I1125 09:04:08.149991 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-th4xx\" (UniqueName: \"kubernetes.io/projected/34cb4d07-ff5e-4905-82b2-f7a2efbda883-kube-api-access-th4xx\") pod \"openstack-operator-index-8twpc\" (UID: \"34cb4d07-ff5e-4905-82b2-f7a2efbda883\") " pod="openstack-operators/openstack-operator-index-8twpc" Nov 25 09:04:08 crc kubenswrapper[4932]: I1125 09:04:08.251551 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-th4xx\" (UniqueName: \"kubernetes.io/projected/34cb4d07-ff5e-4905-82b2-f7a2efbda883-kube-api-access-th4xx\") pod \"openstack-operator-index-8twpc\" (UID: \"34cb4d07-ff5e-4905-82b2-f7a2efbda883\") " pod="openstack-operators/openstack-operator-index-8twpc" Nov 25 09:04:08 crc kubenswrapper[4932]: I1125 09:04:08.275094 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-th4xx\" (UniqueName: \"kubernetes.io/projected/34cb4d07-ff5e-4905-82b2-f7a2efbda883-kube-api-access-th4xx\") pod \"openstack-operator-index-8twpc\" (UID: \"34cb4d07-ff5e-4905-82b2-f7a2efbda883\") " pod="openstack-operators/openstack-operator-index-8twpc" Nov 25 09:04:08 crc kubenswrapper[4932]: I1125 09:04:08.347512 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8twpc" Nov 25 09:04:08 crc kubenswrapper[4932]: I1125 09:04:08.747598 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8twpc"] Nov 25 09:04:09 crc kubenswrapper[4932]: I1125 09:04:09.136462 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8twpc" event={"ID":"34cb4d07-ff5e-4905-82b2-f7a2efbda883","Type":"ContainerStarted","Data":"f3ee0f59b3c72224f92540015cb8c5a38b0fc4141a63e0552f19811b426504f5"} Nov 25 09:04:10 crc kubenswrapper[4932]: I1125 09:04:10.143332 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8twpc" event={"ID":"34cb4d07-ff5e-4905-82b2-f7a2efbda883","Type":"ContainerStarted","Data":"81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c"} Nov 25 09:04:10 crc kubenswrapper[4932]: I1125 09:04:10.167986 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-8twpc" podStartSLOduration=1.364240009 podStartE2EDuration="2.167964915s" podCreationTimestamp="2025-11-25 09:04:08 +0000 UTC" firstStartedPulling="2025-11-25 09:04:08.749665199 +0000 UTC m=+908.875694762" lastFinishedPulling="2025-11-25 09:04:09.553390105 +0000 UTC m=+909.679419668" observedRunningTime="2025-11-25 09:04:10.165551937 +0000 UTC m=+910.291581510" watchObservedRunningTime="2025-11-25 09:04:10.167964915 +0000 UTC m=+910.293994488" Nov 25 09:04:11 crc kubenswrapper[4932]: I1125 09:04:11.397476 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-8twpc"] Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.002573 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-9p2b8"] Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.003370 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9p2b8" Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.014642 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-9p2b8"] Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.103300 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmqsw\" (UniqueName: \"kubernetes.io/projected/386fbf25-e74f-4de6-af9f-af3744d2b7f1-kube-api-access-jmqsw\") pod \"openstack-operator-index-9p2b8\" (UID: \"386fbf25-e74f-4de6-af9f-af3744d2b7f1\") " pod="openstack-operators/openstack-operator-index-9p2b8" Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.156154 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-8twpc" podUID="34cb4d07-ff5e-4905-82b2-f7a2efbda883" containerName="registry-server" containerID="cri-o://81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c" gracePeriod=2 Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.204709 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmqsw\" (UniqueName: \"kubernetes.io/projected/386fbf25-e74f-4de6-af9f-af3744d2b7f1-kube-api-access-jmqsw\") pod \"openstack-operator-index-9p2b8\" (UID: \"386fbf25-e74f-4de6-af9f-af3744d2b7f1\") " pod="openstack-operators/openstack-operator-index-9p2b8" Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.227025 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmqsw\" (UniqueName: \"kubernetes.io/projected/386fbf25-e74f-4de6-af9f-af3744d2b7f1-kube-api-access-jmqsw\") pod \"openstack-operator-index-9p2b8\" (UID: \"386fbf25-e74f-4de6-af9f-af3744d2b7f1\") " pod="openstack-operators/openstack-operator-index-9p2b8" Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.339323 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-9p2b8" Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.540137 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8twpc" Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.609744 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-th4xx\" (UniqueName: \"kubernetes.io/projected/34cb4d07-ff5e-4905-82b2-f7a2efbda883-kube-api-access-th4xx\") pod \"34cb4d07-ff5e-4905-82b2-f7a2efbda883\" (UID: \"34cb4d07-ff5e-4905-82b2-f7a2efbda883\") " Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.614148 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34cb4d07-ff5e-4905-82b2-f7a2efbda883-kube-api-access-th4xx" (OuterVolumeSpecName: "kube-api-access-th4xx") pod "34cb4d07-ff5e-4905-82b2-f7a2efbda883" (UID: "34cb4d07-ff5e-4905-82b2-f7a2efbda883"). InnerVolumeSpecName "kube-api-access-th4xx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.712045 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-th4xx\" (UniqueName: \"kubernetes.io/projected/34cb4d07-ff5e-4905-82b2-f7a2efbda883-kube-api-access-th4xx\") on node \"crc\" DevicePath \"\"" Nov 25 09:04:12 crc kubenswrapper[4932]: I1125 09:04:12.770825 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-9p2b8"] Nov 25 09:04:13 crc kubenswrapper[4932]: I1125 09:04:13.164916 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9p2b8" event={"ID":"386fbf25-e74f-4de6-af9f-af3744d2b7f1","Type":"ContainerStarted","Data":"bba9879848a8f49e8c97dd9d097963d257f1b10a72a4a888057060e92582c990"} Nov 25 09:04:13 crc kubenswrapper[4932]: I1125 09:04:13.166317 4932 generic.go:334] "Generic (PLEG): container finished" podID="34cb4d07-ff5e-4905-82b2-f7a2efbda883" containerID="81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c" exitCode=0 Nov 25 09:04:13 crc kubenswrapper[4932]: I1125 09:04:13.166350 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8twpc" event={"ID":"34cb4d07-ff5e-4905-82b2-f7a2efbda883","Type":"ContainerDied","Data":"81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c"} Nov 25 09:04:13 crc kubenswrapper[4932]: I1125 09:04:13.166370 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8twpc" event={"ID":"34cb4d07-ff5e-4905-82b2-f7a2efbda883","Type":"ContainerDied","Data":"f3ee0f59b3c72224f92540015cb8c5a38b0fc4141a63e0552f19811b426504f5"} Nov 25 09:04:13 crc kubenswrapper[4932]: I1125 09:04:13.166392 4932 scope.go:117] "RemoveContainer" containerID="81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c" Nov 25 09:04:13 crc kubenswrapper[4932]: I1125 09:04:13.166479 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8twpc" Nov 25 09:04:13 crc kubenswrapper[4932]: I1125 09:04:13.186848 4932 scope.go:117] "RemoveContainer" containerID="81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c" Nov 25 09:04:13 crc kubenswrapper[4932]: E1125 09:04:13.188052 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c\": container with ID starting with 81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c not found: ID does not exist" containerID="81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c" Nov 25 09:04:13 crc kubenswrapper[4932]: I1125 09:04:13.188108 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c"} err="failed to get container status \"81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c\": rpc error: code = NotFound desc = could not find container \"81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c\": container with ID starting with 81a083a0581755d3e93f74a100f5dc171491640bce173e7d83387582760a294c not found: ID does not exist" Nov 25 09:04:13 crc kubenswrapper[4932]: I1125 09:04:13.213322 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-8twpc"] Nov 25 09:04:13 crc kubenswrapper[4932]: I1125 09:04:13.218315 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-8twpc"] Nov 25 09:04:14 crc kubenswrapper[4932]: I1125 09:04:14.176065 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-9p2b8" event={"ID":"386fbf25-e74f-4de6-af9f-af3744d2b7f1","Type":"ContainerStarted","Data":"ec0f6e80b380799bc41a8bbf95202879d99a00dbfcf7ba4843b1140abdef714b"} Nov 25 09:04:14 crc kubenswrapper[4932]: I1125 09:04:14.195735 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-9p2b8" podStartSLOduration=2.670527388 podStartE2EDuration="3.195713086s" podCreationTimestamp="2025-11-25 09:04:11 +0000 UTC" firstStartedPulling="2025-11-25 09:04:12.784983343 +0000 UTC m=+912.911012906" lastFinishedPulling="2025-11-25 09:04:13.310169021 +0000 UTC m=+913.436198604" observedRunningTime="2025-11-25 09:04:14.193639158 +0000 UTC m=+914.319668721" watchObservedRunningTime="2025-11-25 09:04:14.195713086 +0000 UTC m=+914.321742659" Nov 25 09:04:14 crc kubenswrapper[4932]: I1125 09:04:14.614527 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34cb4d07-ff5e-4905-82b2-f7a2efbda883" path="/var/lib/kubelet/pods/34cb4d07-ff5e-4905-82b2-f7a2efbda883/volumes" Nov 25 09:04:22 crc kubenswrapper[4932]: I1125 09:04:22.340368 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-9p2b8" Nov 25 09:04:22 crc kubenswrapper[4932]: I1125 09:04:22.341365 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-9p2b8" Nov 25 09:04:22 crc kubenswrapper[4932]: I1125 09:04:22.370670 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-9p2b8" Nov 25 09:04:23 crc kubenswrapper[4932]: I1125 09:04:23.277468 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-9p2b8" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.040487 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp"] Nov 25 09:04:28 crc kubenswrapper[4932]: E1125 09:04:28.042432 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34cb4d07-ff5e-4905-82b2-f7a2efbda883" containerName="registry-server" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.042536 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="34cb4d07-ff5e-4905-82b2-f7a2efbda883" containerName="registry-server" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.043422 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="34cb4d07-ff5e-4905-82b2-f7a2efbda883" containerName="registry-server" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.044465 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.047469 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-jh2q7" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.054407 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp"] Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.121236 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-util\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.121644 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lmvx\" (UniqueName: \"kubernetes.io/projected/4e7cab60-e3b0-439f-97d0-64e1e568967e-kube-api-access-9lmvx\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.121676 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-bundle\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.223089 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lmvx\" (UniqueName: \"kubernetes.io/projected/4e7cab60-e3b0-439f-97d0-64e1e568967e-kube-api-access-9lmvx\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.223152 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-bundle\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.223190 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-util\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.223887 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-util\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.224136 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-bundle\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.247530 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lmvx\" (UniqueName: \"kubernetes.io/projected/4e7cab60-e3b0-439f-97d0-64e1e568967e-kube-api-access-9lmvx\") pod \"bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.363739 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:28 crc kubenswrapper[4932]: I1125 09:04:28.542037 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp"] Nov 25 09:04:28 crc kubenswrapper[4932]: W1125 09:04:28.556396 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e7cab60_e3b0_439f_97d0_64e1e568967e.slice/crio-f5b8dd66655c9e48df42b4f0a1bb6cf4256378fcb04b1999bb72614fdb47f917 WatchSource:0}: Error finding container f5b8dd66655c9e48df42b4f0a1bb6cf4256378fcb04b1999bb72614fdb47f917: Status 404 returned error can't find the container with id f5b8dd66655c9e48df42b4f0a1bb6cf4256378fcb04b1999bb72614fdb47f917 Nov 25 09:04:29 crc kubenswrapper[4932]: I1125 09:04:29.278526 4932 generic.go:334] "Generic (PLEG): container finished" podID="4e7cab60-e3b0-439f-97d0-64e1e568967e" containerID="cc102c24fadce13d3330fe7d54f25f4827d13b7008adb7ad4bd4fe343a86d77d" exitCode=0 Nov 25 09:04:29 crc kubenswrapper[4932]: I1125 09:04:29.278585 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" event={"ID":"4e7cab60-e3b0-439f-97d0-64e1e568967e","Type":"ContainerDied","Data":"cc102c24fadce13d3330fe7d54f25f4827d13b7008adb7ad4bd4fe343a86d77d"} Nov 25 09:04:29 crc kubenswrapper[4932]: I1125 09:04:29.278850 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" event={"ID":"4e7cab60-e3b0-439f-97d0-64e1e568967e","Type":"ContainerStarted","Data":"f5b8dd66655c9e48df42b4f0a1bb6cf4256378fcb04b1999bb72614fdb47f917"} Nov 25 09:04:30 crc kubenswrapper[4932]: I1125 09:04:30.287025 4932 generic.go:334] "Generic (PLEG): container finished" podID="4e7cab60-e3b0-439f-97d0-64e1e568967e" containerID="b0e9efc7b1321e3621c4c5fc20c78b803f76bb33a366f403091ac10f562f47e8" exitCode=0 Nov 25 09:04:30 crc kubenswrapper[4932]: I1125 09:04:30.287088 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" event={"ID":"4e7cab60-e3b0-439f-97d0-64e1e568967e","Type":"ContainerDied","Data":"b0e9efc7b1321e3621c4c5fc20c78b803f76bb33a366f403091ac10f562f47e8"} Nov 25 09:04:31 crc kubenswrapper[4932]: I1125 09:04:31.294833 4932 generic.go:334] "Generic (PLEG): container finished" podID="4e7cab60-e3b0-439f-97d0-64e1e568967e" containerID="9b879ba4ca88ad909605550b104a77f9c2719034da34425c1ffca510149db8e0" exitCode=0 Nov 25 09:04:31 crc kubenswrapper[4932]: I1125 09:04:31.294908 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" event={"ID":"4e7cab60-e3b0-439f-97d0-64e1e568967e","Type":"ContainerDied","Data":"9b879ba4ca88ad909605550b104a77f9c2719034da34425c1ffca510149db8e0"} Nov 25 09:04:32 crc kubenswrapper[4932]: I1125 09:04:32.535392 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:32 crc kubenswrapper[4932]: I1125 09:04:32.680762 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-bundle\") pod \"4e7cab60-e3b0-439f-97d0-64e1e568967e\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " Nov 25 09:04:32 crc kubenswrapper[4932]: I1125 09:04:32.680842 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-util\") pod \"4e7cab60-e3b0-439f-97d0-64e1e568967e\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " Nov 25 09:04:32 crc kubenswrapper[4932]: I1125 09:04:32.680877 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9lmvx\" (UniqueName: \"kubernetes.io/projected/4e7cab60-e3b0-439f-97d0-64e1e568967e-kube-api-access-9lmvx\") pod \"4e7cab60-e3b0-439f-97d0-64e1e568967e\" (UID: \"4e7cab60-e3b0-439f-97d0-64e1e568967e\") " Nov 25 09:04:32 crc kubenswrapper[4932]: I1125 09:04:32.681625 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-bundle" (OuterVolumeSpecName: "bundle") pod "4e7cab60-e3b0-439f-97d0-64e1e568967e" (UID: "4e7cab60-e3b0-439f-97d0-64e1e568967e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:04:32 crc kubenswrapper[4932]: I1125 09:04:32.692001 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e7cab60-e3b0-439f-97d0-64e1e568967e-kube-api-access-9lmvx" (OuterVolumeSpecName: "kube-api-access-9lmvx") pod "4e7cab60-e3b0-439f-97d0-64e1e568967e" (UID: "4e7cab60-e3b0-439f-97d0-64e1e568967e"). InnerVolumeSpecName "kube-api-access-9lmvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:04:32 crc kubenswrapper[4932]: I1125 09:04:32.698230 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-util" (OuterVolumeSpecName: "util") pod "4e7cab60-e3b0-439f-97d0-64e1e568967e" (UID: "4e7cab60-e3b0-439f-97d0-64e1e568967e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:04:32 crc kubenswrapper[4932]: I1125 09:04:32.782163 4932 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:04:32 crc kubenswrapper[4932]: I1125 09:04:32.782225 4932 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/4e7cab60-e3b0-439f-97d0-64e1e568967e-util\") on node \"crc\" DevicePath \"\"" Nov 25 09:04:32 crc kubenswrapper[4932]: I1125 09:04:32.782235 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9lmvx\" (UniqueName: \"kubernetes.io/projected/4e7cab60-e3b0-439f-97d0-64e1e568967e-kube-api-access-9lmvx\") on node \"crc\" DevicePath \"\"" Nov 25 09:04:33 crc kubenswrapper[4932]: I1125 09:04:33.309141 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" event={"ID":"4e7cab60-e3b0-439f-97d0-64e1e568967e","Type":"ContainerDied","Data":"f5b8dd66655c9e48df42b4f0a1bb6cf4256378fcb04b1999bb72614fdb47f917"} Nov 25 09:04:33 crc kubenswrapper[4932]: I1125 09:04:33.309207 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5b8dd66655c9e48df42b4f0a1bb6cf4256378fcb04b1999bb72614fdb47f917" Nov 25 09:04:33 crc kubenswrapper[4932]: I1125 09:04:33.309271 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/bbe0292a041351b2e91c74017e768208b36f144dd799fdf82c414fd15fkgrmp" Nov 25 09:04:37 crc kubenswrapper[4932]: I1125 09:04:37.181543 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:04:37 crc kubenswrapper[4932]: I1125 09:04:37.182019 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.203298 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr"] Nov 25 09:04:39 crc kubenswrapper[4932]: E1125 09:04:39.203920 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e7cab60-e3b0-439f-97d0-64e1e568967e" containerName="util" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.203935 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e7cab60-e3b0-439f-97d0-64e1e568967e" containerName="util" Nov 25 09:04:39 crc kubenswrapper[4932]: E1125 09:04:39.203956 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e7cab60-e3b0-439f-97d0-64e1e568967e" containerName="pull" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.203963 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e7cab60-e3b0-439f-97d0-64e1e568967e" containerName="pull" Nov 25 09:04:39 crc kubenswrapper[4932]: E1125 09:04:39.203976 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e7cab60-e3b0-439f-97d0-64e1e568967e" containerName="extract" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.203984 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e7cab60-e3b0-439f-97d0-64e1e568967e" containerName="extract" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.204143 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e7cab60-e3b0-439f-97d0-64e1e568967e" containerName="extract" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.204678 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.206864 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-cqv7v" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.243172 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr"] Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.268942 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbjkv\" (UniqueName: \"kubernetes.io/projected/58b17ce6-9e76-4007-ac84-d59d6c3c38d2-kube-api-access-sbjkv\") pod \"openstack-operator-controller-operator-7b567956b5-842jr\" (UID: \"58b17ce6-9e76-4007-ac84-d59d6c3c38d2\") " pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.371111 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbjkv\" (UniqueName: \"kubernetes.io/projected/58b17ce6-9e76-4007-ac84-d59d6c3c38d2-kube-api-access-sbjkv\") pod \"openstack-operator-controller-operator-7b567956b5-842jr\" (UID: \"58b17ce6-9e76-4007-ac84-d59d6c3c38d2\") " pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.410091 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbjkv\" (UniqueName: \"kubernetes.io/projected/58b17ce6-9e76-4007-ac84-d59d6c3c38d2-kube-api-access-sbjkv\") pod \"openstack-operator-controller-operator-7b567956b5-842jr\" (UID: \"58b17ce6-9e76-4007-ac84-d59d6c3c38d2\") " pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.526456 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" Nov 25 09:04:39 crc kubenswrapper[4932]: I1125 09:04:39.754147 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr"] Nov 25 09:04:40 crc kubenswrapper[4932]: I1125 09:04:40.352794 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" event={"ID":"58b17ce6-9e76-4007-ac84-d59d6c3c38d2","Type":"ContainerStarted","Data":"9a55236d25fe94550db3c90356234792a5bc5e9d7233dc24e76611e955170a15"} Nov 25 09:04:45 crc kubenswrapper[4932]: I1125 09:04:45.393140 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" event={"ID":"58b17ce6-9e76-4007-ac84-d59d6c3c38d2","Type":"ContainerStarted","Data":"74f7d8e83295f2bc7a2087b674e4f8a1e22c6cb43d6f4d1404ccdc4beccfca28"} Nov 25 09:04:45 crc kubenswrapper[4932]: I1125 09:04:45.393689 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" Nov 25 09:04:45 crc kubenswrapper[4932]: I1125 09:04:45.427148 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" podStartSLOduration=1.7956777910000001 podStartE2EDuration="6.427129868s" podCreationTimestamp="2025-11-25 09:04:39 +0000 UTC" firstStartedPulling="2025-11-25 09:04:39.767676418 +0000 UTC m=+939.893705981" lastFinishedPulling="2025-11-25 09:04:44.399128495 +0000 UTC m=+944.525158058" observedRunningTime="2025-11-25 09:04:45.423922097 +0000 UTC m=+945.549951700" watchObservedRunningTime="2025-11-25 09:04:45.427129868 +0000 UTC m=+945.553159431" Nov 25 09:04:49 crc kubenswrapper[4932]: I1125 09:04:49.530606 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.132314 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.134070 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.136872 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-hgw57" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.145357 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.149922 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.151065 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.153696 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-4g7km" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.164269 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.176969 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.177938 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.184522 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-kmtcm" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.199786 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.214454 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.215395 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.218295 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-gcfvd" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.259317 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.260340 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.263306 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-kff9b" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.268154 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.269507 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.273108 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-9dnd9" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.286731 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.287473 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-897wn\" (UniqueName: \"kubernetes.io/projected/96d031ad-3550-4423-9422-93911c9a8217-kube-api-access-897wn\") pod \"barbican-operator-controller-manager-86dc4d89c8-ps52v\" (UID: \"96d031ad-3550-4423-9422-93911c9a8217\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.287565 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7pwm\" (UniqueName: \"kubernetes.io/projected/a92ad4a6-d922-45c1-b02d-f382b1ea1cc0-kube-api-access-p7pwm\") pod \"designate-operator-controller-manager-7d695c9b56-m2tpg\" (UID: \"a92ad4a6-d922-45c1-b02d-f382b1ea1cc0\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.287593 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22w6m\" (UniqueName: \"kubernetes.io/projected/bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd-kube-api-access-22w6m\") pod \"cinder-operator-controller-manager-79856dc55c-t6t6s\" (UID: \"bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.295829 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.300564 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.301802 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.303655 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.306952 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-jt78v" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.308362 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.373598 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.389309 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqnsm\" (UniqueName: \"kubernetes.io/projected/bde38973-f401-4917-8abc-08dafaf8f10c-kube-api-access-cqnsm\") pod \"glance-operator-controller-manager-68b95954c9-x4l6r\" (UID: \"bde38973-f401-4917-8abc-08dafaf8f10c\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.389385 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-897wn\" (UniqueName: \"kubernetes.io/projected/96d031ad-3550-4423-9422-93911c9a8217-kube-api-access-897wn\") pod \"barbican-operator-controller-manager-86dc4d89c8-ps52v\" (UID: \"96d031ad-3550-4423-9422-93911c9a8217\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.389933 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7pwm\" (UniqueName: \"kubernetes.io/projected/a92ad4a6-d922-45c1-b02d-f382b1ea1cc0-kube-api-access-p7pwm\") pod \"designate-operator-controller-manager-7d695c9b56-m2tpg\" (UID: \"a92ad4a6-d922-45c1-b02d-f382b1ea1cc0\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.389965 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfj6b\" (UniqueName: \"kubernetes.io/projected/d2216d92-9e2d-4549-b634-63ec3ada9f14-kube-api-access-bfj6b\") pod \"heat-operator-controller-manager-774b86978c-bk2nv\" (UID: \"d2216d92-9e2d-4549-b634-63ec3ada9f14\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.389994 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22w6m\" (UniqueName: \"kubernetes.io/projected/bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd-kube-api-access-22w6m\") pod \"cinder-operator-controller-manager-79856dc55c-t6t6s\" (UID: \"bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.390022 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/765f296f-cd42-4f2c-9b21-2bcbc65d490c-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-5bkct\" (UID: \"765f296f-cd42-4f2c-9b21-2bcbc65d490c\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.390382 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gx5fh\" (UniqueName: \"kubernetes.io/projected/65fb5603-367e-431f-a8d3-0a3281a70361-kube-api-access-gx5fh\") pod \"horizon-operator-controller-manager-68c9694994-jg9pn\" (UID: \"65fb5603-367e-431f-a8d3-0a3281a70361\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.390422 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s552v\" (UniqueName: \"kubernetes.io/projected/765f296f-cd42-4f2c-9b21-2bcbc65d490c-kube-api-access-s552v\") pod \"infra-operator-controller-manager-d5cc86f4b-5bkct\" (UID: \"765f296f-cd42-4f2c-9b21-2bcbc65d490c\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.406106 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.407320 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.409952 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-msxsh" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.429053 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22w6m\" (UniqueName: \"kubernetes.io/projected/bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd-kube-api-access-22w6m\") pod \"cinder-operator-controller-manager-79856dc55c-t6t6s\" (UID: \"bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd\") " pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.442566 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.446926 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-897wn\" (UniqueName: \"kubernetes.io/projected/96d031ad-3550-4423-9422-93911c9a8217-kube-api-access-897wn\") pod \"barbican-operator-controller-manager-86dc4d89c8-ps52v\" (UID: \"96d031ad-3550-4423-9422-93911c9a8217\") " pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.453975 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7pwm\" (UniqueName: \"kubernetes.io/projected/a92ad4a6-d922-45c1-b02d-f382b1ea1cc0-kube-api-access-p7pwm\") pod \"designate-operator-controller-manager-7d695c9b56-m2tpg\" (UID: \"a92ad4a6-d922-45c1-b02d-f382b1ea1cc0\") " pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.458763 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.476926 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.478155 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.480495 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.484000 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-zh4cp" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.493110 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfj6b\" (UniqueName: \"kubernetes.io/projected/d2216d92-9e2d-4549-b634-63ec3ada9f14-kube-api-access-bfj6b\") pod \"heat-operator-controller-manager-774b86978c-bk2nv\" (UID: \"d2216d92-9e2d-4549-b634-63ec3ada9f14\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.493218 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/765f296f-cd42-4f2c-9b21-2bcbc65d490c-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-5bkct\" (UID: \"765f296f-cd42-4f2c-9b21-2bcbc65d490c\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.493245 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gx5fh\" (UniqueName: \"kubernetes.io/projected/65fb5603-367e-431f-a8d3-0a3281a70361-kube-api-access-gx5fh\") pod \"horizon-operator-controller-manager-68c9694994-jg9pn\" (UID: \"65fb5603-367e-431f-a8d3-0a3281a70361\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.493271 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s552v\" (UniqueName: \"kubernetes.io/projected/765f296f-cd42-4f2c-9b21-2bcbc65d490c-kube-api-access-s552v\") pod \"infra-operator-controller-manager-d5cc86f4b-5bkct\" (UID: \"765f296f-cd42-4f2c-9b21-2bcbc65d490c\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.493317 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqnsm\" (UniqueName: \"kubernetes.io/projected/bde38973-f401-4917-8abc-08dafaf8f10c-kube-api-access-cqnsm\") pod \"glance-operator-controller-manager-68b95954c9-x4l6r\" (UID: \"bde38973-f401-4917-8abc-08dafaf8f10c\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.493417 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mkhf\" (UniqueName: \"kubernetes.io/projected/6dedf441-145d-4642-a0f0-fb691d2edd2d-kube-api-access-8mkhf\") pod \"ironic-operator-controller-manager-5bfcdc958c-b9l7b\" (UID: \"6dedf441-145d-4642-a0f0-fb691d2edd2d\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 09:05:06 crc kubenswrapper[4932]: E1125 09:05:06.493856 4932 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 09:05:06 crc kubenswrapper[4932]: E1125 09:05:06.493907 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/765f296f-cd42-4f2c-9b21-2bcbc65d490c-cert podName:765f296f-cd42-4f2c-9b21-2bcbc65d490c nodeName:}" failed. No retries permitted until 2025-11-25 09:05:06.99389033 +0000 UTC m=+967.119919893 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/765f296f-cd42-4f2c-9b21-2bcbc65d490c-cert") pod "infra-operator-controller-manager-d5cc86f4b-5bkct" (UID: "765f296f-cd42-4f2c-9b21-2bcbc65d490c") : secret "infra-operator-webhook-server-cert" not found Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.496299 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.498935 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.515418 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.516469 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.524755 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfj6b\" (UniqueName: \"kubernetes.io/projected/d2216d92-9e2d-4549-b634-63ec3ada9f14-kube-api-access-bfj6b\") pod \"heat-operator-controller-manager-774b86978c-bk2nv\" (UID: \"d2216d92-9e2d-4549-b634-63ec3ada9f14\") " pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.535425 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqnsm\" (UniqueName: \"kubernetes.io/projected/bde38973-f401-4917-8abc-08dafaf8f10c-kube-api-access-cqnsm\") pod \"glance-operator-controller-manager-68b95954c9-x4l6r\" (UID: \"bde38973-f401-4917-8abc-08dafaf8f10c\") " pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.535501 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-vdfwn" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.539454 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s552v\" (UniqueName: \"kubernetes.io/projected/765f296f-cd42-4f2c-9b21-2bcbc65d490c-kube-api-access-s552v\") pod \"infra-operator-controller-manager-d5cc86f4b-5bkct\" (UID: \"765f296f-cd42-4f2c-9b21-2bcbc65d490c\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.540902 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gx5fh\" (UniqueName: \"kubernetes.io/projected/65fb5603-367e-431f-a8d3-0a3281a70361-kube-api-access-gx5fh\") pod \"horizon-operator-controller-manager-68c9694994-jg9pn\" (UID: \"65fb5603-367e-431f-a8d3-0a3281a70361\") " pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.547784 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.556436 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.560001 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-tckm9" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.564757 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.582912 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.583945 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.586738 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-jfmk2" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.593568 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.594514 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mkhf\" (UniqueName: \"kubernetes.io/projected/6dedf441-145d-4642-a0f0-fb691d2edd2d-kube-api-access-8mkhf\") pod \"ironic-operator-controller-manager-5bfcdc958c-b9l7b\" (UID: \"6dedf441-145d-4642-a0f0-fb691d2edd2d\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.594599 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw29w\" (UniqueName: \"kubernetes.io/projected/12f70ae4-14e2-4eed-9c1d-29e380a6d757-kube-api-access-tw29w\") pod \"keystone-operator-controller-manager-748dc6576f-fjlpt\" (UID: \"12f70ae4-14e2-4eed-9c1d-29e380a6d757\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.594634 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nn5m\" (UniqueName: \"kubernetes.io/projected/6fcca084-72cb-48ba-948f-6c4d861f6096-kube-api-access-4nn5m\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-phkzd\" (UID: \"6fcca084-72cb-48ba-948f-6c4d861f6096\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.618508 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.630728 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mkhf\" (UniqueName: \"kubernetes.io/projected/6dedf441-145d-4642-a0f0-fb691d2edd2d-kube-api-access-8mkhf\") pod \"ironic-operator-controller-manager-5bfcdc958c-b9l7b\" (UID: \"6dedf441-145d-4642-a0f0-fb691d2edd2d\") " pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.648083 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.657784 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.660883 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.662974 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-hvdw4" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.669311 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.686903 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.694539 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.695460 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw29w\" (UniqueName: \"kubernetes.io/projected/12f70ae4-14e2-4eed-9c1d-29e380a6d757-kube-api-access-tw29w\") pod \"keystone-operator-controller-manager-748dc6576f-fjlpt\" (UID: \"12f70ae4-14e2-4eed-9c1d-29e380a6d757\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.695519 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nn5m\" (UniqueName: \"kubernetes.io/projected/6fcca084-72cb-48ba-948f-6c4d861f6096-kube-api-access-4nn5m\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-phkzd\" (UID: \"6fcca084-72cb-48ba-948f-6c4d861f6096\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.695551 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bc474\" (UniqueName: \"kubernetes.io/projected/e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d-kube-api-access-bc474\") pod \"manila-operator-controller-manager-58bb8d67cc-bbmvf\" (UID: \"e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.695668 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h5p2\" (UniqueName: \"kubernetes.io/projected/070a395c-8ac5-4303-80fb-7f93282a9f99-kube-api-access-8h5p2\") pod \"nova-operator-controller-manager-79556f57fc-rm8qr\" (UID: \"070a395c-8ac5-4303-80fb-7f93282a9f99\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.697864 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.699842 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-mx44d" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.701346 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.702332 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.706215 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.706436 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-rwzlp" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.707526 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.708843 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.711132 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-gr8tp" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.719536 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.748763 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.770069 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw29w\" (UniqueName: \"kubernetes.io/projected/12f70ae4-14e2-4eed-9c1d-29e380a6d757-kube-api-access-tw29w\") pod \"keystone-operator-controller-manager-748dc6576f-fjlpt\" (UID: \"12f70ae4-14e2-4eed-9c1d-29e380a6d757\") " pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.770747 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nn5m\" (UniqueName: \"kubernetes.io/projected/6fcca084-72cb-48ba-948f-6c4d861f6096-kube-api-access-4nn5m\") pod \"mariadb-operator-controller-manager-cb6c4fdb7-phkzd\" (UID: \"6fcca084-72cb-48ba-948f-6c4d861f6096\") " pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.773242 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.774346 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.778679 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-nlzgm" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.786081 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.797626 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88f6z\" (UniqueName: \"kubernetes.io/projected/af96b4c7-e9eb-4609-afab-ba3cc15f0a48-kube-api-access-88f6z\") pod \"ovn-operator-controller-manager-66cf5c67ff-rcv5q\" (UID: \"af96b4c7-e9eb-4609-afab-ba3cc15f0a48\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.797682 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8wxv\" (UniqueName: \"kubernetes.io/projected/dae34761-581e-4f65-8d7c-d6c2d302b4f7-kube-api-access-w8wxv\") pod \"octavia-operator-controller-manager-fd75fd47d-q7rt6\" (UID: \"dae34761-581e-4f65-8d7c-d6c2d302b4f7\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.797728 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bc474\" (UniqueName: \"kubernetes.io/projected/e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d-kube-api-access-bc474\") pod \"manila-operator-controller-manager-58bb8d67cc-bbmvf\" (UID: \"e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.797806 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-lmss6\" (UID: \"6c508686-35cb-4c09-8ee6-2c655072d7d3\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.797834 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2qvp\" (UniqueName: \"kubernetes.io/projected/243ff257-9836-4e43-9228-e05f18282650-kube-api-access-r2qvp\") pod \"neutron-operator-controller-manager-7c57c8bbc4-cwqvg\" (UID: \"243ff257-9836-4e43-9228-e05f18282650\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.797861 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h5p2\" (UniqueName: \"kubernetes.io/projected/070a395c-8ac5-4303-80fb-7f93282a9f99-kube-api-access-8h5p2\") pod \"nova-operator-controller-manager-79556f57fc-rm8qr\" (UID: \"070a395c-8ac5-4303-80fb-7f93282a9f99\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.797902 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8hj6\" (UniqueName: \"kubernetes.io/projected/6c508686-35cb-4c09-8ee6-2c655072d7d3-kube-api-access-q8hj6\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-lmss6\" (UID: \"6c508686-35cb-4c09-8ee6-2c655072d7d3\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.825239 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.831094 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bc474\" (UniqueName: \"kubernetes.io/projected/e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d-kube-api-access-bc474\") pod \"manila-operator-controller-manager-58bb8d67cc-bbmvf\" (UID: \"e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d\") " pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.831491 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.837832 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h5p2\" (UniqueName: \"kubernetes.io/projected/070a395c-8ac5-4303-80fb-7f93282a9f99-kube-api-access-8h5p2\") pod \"nova-operator-controller-manager-79556f57fc-rm8qr\" (UID: \"070a395c-8ac5-4303-80fb-7f93282a9f99\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.844466 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.845818 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.848114 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-x64ll" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.863584 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.864224 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.876643 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.877991 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.884599 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-pbm5f" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.894031 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.894663 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28"] Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.894762 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.898882 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88f6z\" (UniqueName: \"kubernetes.io/projected/af96b4c7-e9eb-4609-afab-ba3cc15f0a48-kube-api-access-88f6z\") pod \"ovn-operator-controller-manager-66cf5c67ff-rcv5q\" (UID: \"af96b4c7-e9eb-4609-afab-ba3cc15f0a48\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.898914 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8wxv\" (UniqueName: \"kubernetes.io/projected/dae34761-581e-4f65-8d7c-d6c2d302b4f7-kube-api-access-w8wxv\") pod \"octavia-operator-controller-manager-fd75fd47d-q7rt6\" (UID: \"dae34761-581e-4f65-8d7c-d6c2d302b4f7\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.898978 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-lmss6\" (UID: \"6c508686-35cb-4c09-8ee6-2c655072d7d3\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.898998 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djfj5\" (UniqueName: \"kubernetes.io/projected/8c014265-53e2-4c4d-9c25-452686712f2e-kube-api-access-djfj5\") pod \"placement-operator-controller-manager-5db546f9d9-4wph8\" (UID: \"8c014265-53e2-4c4d-9c25-452686712f2e\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.899019 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2qvp\" (UniqueName: \"kubernetes.io/projected/243ff257-9836-4e43-9228-e05f18282650-kube-api-access-r2qvp\") pod \"neutron-operator-controller-manager-7c57c8bbc4-cwqvg\" (UID: \"243ff257-9836-4e43-9228-e05f18282650\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.899054 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8hj6\" (UniqueName: \"kubernetes.io/projected/6c508686-35cb-4c09-8ee6-2c655072d7d3-kube-api-access-q8hj6\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-lmss6\" (UID: \"6c508686-35cb-4c09-8ee6-2c655072d7d3\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:06 crc kubenswrapper[4932]: E1125 09:05:06.899589 4932 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:05:06 crc kubenswrapper[4932]: E1125 09:05:06.899627 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert podName:6c508686-35cb-4c09-8ee6-2c655072d7d3 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:07.39961431 +0000 UTC m=+967.525643873 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert") pod "openstack-baremetal-operator-controller-manager-b58f89467-lmss6" (UID: "6c508686-35cb-4c09-8ee6-2c655072d7d3") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.926638 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.943678 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88f6z\" (UniqueName: \"kubernetes.io/projected/af96b4c7-e9eb-4609-afab-ba3cc15f0a48-kube-api-access-88f6z\") pod \"ovn-operator-controller-manager-66cf5c67ff-rcv5q\" (UID: \"af96b4c7-e9eb-4609-afab-ba3cc15f0a48\") " pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.946263 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8wxv\" (UniqueName: \"kubernetes.io/projected/dae34761-581e-4f65-8d7c-d6c2d302b4f7-kube-api-access-w8wxv\") pod \"octavia-operator-controller-manager-fd75fd47d-q7rt6\" (UID: \"dae34761-581e-4f65-8d7c-d6c2d302b4f7\") " pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.951439 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.958261 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2qvp\" (UniqueName: \"kubernetes.io/projected/243ff257-9836-4e43-9228-e05f18282650-kube-api-access-r2qvp\") pod \"neutron-operator-controller-manager-7c57c8bbc4-cwqvg\" (UID: \"243ff257-9836-4e43-9228-e05f18282650\") " pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.961650 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 09:05:06 crc kubenswrapper[4932]: I1125 09:05:06.966079 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8hj6\" (UniqueName: \"kubernetes.io/projected/6c508686-35cb-4c09-8ee6-2c655072d7d3-kube-api-access-q8hj6\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-lmss6\" (UID: \"6c508686-35cb-4c09-8ee6-2c655072d7d3\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.022045 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.035519 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.043917 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/765f296f-cd42-4f2c-9b21-2bcbc65d490c-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-5bkct\" (UID: \"765f296f-cd42-4f2c-9b21-2bcbc65d490c\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.043989 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djfj5\" (UniqueName: \"kubernetes.io/projected/8c014265-53e2-4c4d-9c25-452686712f2e-kube-api-access-djfj5\") pod \"placement-operator-controller-manager-5db546f9d9-4wph8\" (UID: \"8c014265-53e2-4c4d-9c25-452686712f2e\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.044045 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nx5z\" (UniqueName: \"kubernetes.io/projected/695ce8a3-6a30-42a4-8ba2-f6309470362c-kube-api-access-5nx5z\") pod \"telemetry-operator-controller-manager-567f98c9d-blm28\" (UID: \"695ce8a3-6a30-42a4-8ba2-f6309470362c\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.053614 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fd45n\" (UniqueName: \"kubernetes.io/projected/d4860edf-9f45-4dd2-8e35-7c3a4444370a-kube-api-access-fd45n\") pod \"swift-operator-controller-manager-6fdc4fcf86-tkjb4\" (UID: \"d4860edf-9f45-4dd2-8e35-7c3a4444370a\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.057895 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/765f296f-cd42-4f2c-9b21-2bcbc65d490c-cert\") pod \"infra-operator-controller-manager-d5cc86f4b-5bkct\" (UID: \"765f296f-cd42-4f2c-9b21-2bcbc65d490c\") " pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.108258 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.108370 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.115224 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-5w7bk" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.115564 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djfj5\" (UniqueName: \"kubernetes.io/projected/8c014265-53e2-4c4d-9c25-452686712f2e-kube-api-access-djfj5\") pod \"placement-operator-controller-manager-5db546f9d9-4wph8\" (UID: \"8c014265-53e2-4c4d-9c25-452686712f2e\") " pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.118325 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.151767 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-pkjjd"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.156640 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.160289 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nx5z\" (UniqueName: \"kubernetes.io/projected/695ce8a3-6a30-42a4-8ba2-f6309470362c-kube-api-access-5nx5z\") pod \"telemetry-operator-controller-manager-567f98c9d-blm28\" (UID: \"695ce8a3-6a30-42a4-8ba2-f6309470362c\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.160387 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fd45n\" (UniqueName: \"kubernetes.io/projected/d4860edf-9f45-4dd2-8e35-7c3a4444370a-kube-api-access-fd45n\") pod \"swift-operator-controller-manager-6fdc4fcf86-tkjb4\" (UID: \"d4860edf-9f45-4dd2-8e35-7c3a4444370a\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.176171 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-pkjjd"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.177560 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-xq42j" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.193035 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.193098 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.193153 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.193831 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"91eb2e40d6f72fe209b50e6c986a543f1a5accc33bb0098951f158439d3b5195"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.193890 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://91eb2e40d6f72fe209b50e6c986a543f1a5accc33bb0098951f158439d3b5195" gracePeriod=600 Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.217808 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nx5z\" (UniqueName: \"kubernetes.io/projected/695ce8a3-6a30-42a4-8ba2-f6309470362c-kube-api-access-5nx5z\") pod \"telemetry-operator-controller-manager-567f98c9d-blm28\" (UID: \"695ce8a3-6a30-42a4-8ba2-f6309470362c\") " pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.224946 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fd45n\" (UniqueName: \"kubernetes.io/projected/d4860edf-9f45-4dd2-8e35-7c3a4444370a-kube-api-access-fd45n\") pod \"swift-operator-controller-manager-6fdc4fcf86-tkjb4\" (UID: \"d4860edf-9f45-4dd2-8e35-7c3a4444370a\") " pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.231914 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.232210 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.233143 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.234756 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-dvb8s" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.235321 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.235520 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.244681 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.248314 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.255223 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.256169 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.257865 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-x9vzz" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.257917 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.261489 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drtm6\" (UniqueName: \"kubernetes.io/projected/3aadd9b8-da59-45e3-979b-ac4896561d6c-kube-api-access-drtm6\") pod \"test-operator-controller-manager-5cb74df96-cjzfx\" (UID: \"3aadd9b8-da59-45e3-979b-ac4896561d6c\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.261631 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zj8k\" (UniqueName: \"kubernetes.io/projected/45ebb480-733b-47a3-a682-8fe0be16eb78-kube-api-access-9zj8k\") pod \"watcher-operator-controller-manager-864885998-pkjjd\" (UID: \"45ebb480-733b-47a3-a682-8fe0be16eb78\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.264929 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.280198 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.363379 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9zj8k\" (UniqueName: \"kubernetes.io/projected/45ebb480-733b-47a3-a682-8fe0be16eb78-kube-api-access-9zj8k\") pod \"watcher-operator-controller-manager-864885998-pkjjd\" (UID: \"45ebb480-733b-47a3-a682-8fe0be16eb78\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.363440 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-metrics-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.363500 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz6k8\" (UniqueName: \"kubernetes.io/projected/66b0ef7a-14c8-4702-8e52-67809a677880-kube-api-access-qz6k8\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.363544 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.363571 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drtm6\" (UniqueName: \"kubernetes.io/projected/3aadd9b8-da59-45e3-979b-ac4896561d6c-kube-api-access-drtm6\") pod \"test-operator-controller-manager-5cb74df96-cjzfx\" (UID: \"3aadd9b8-da59-45e3-979b-ac4896561d6c\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.363618 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgccj\" (UniqueName: \"kubernetes.io/projected/1b5af146-d2d1-4526-8a10-84ebc35baca8-kube-api-access-jgccj\") pod \"rabbitmq-cluster-operator-manager-668c99d594-bwv87\" (UID: \"1b5af146-d2d1-4526-8a10-84ebc35baca8\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.388801 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drtm6\" (UniqueName: \"kubernetes.io/projected/3aadd9b8-da59-45e3-979b-ac4896561d6c-kube-api-access-drtm6\") pod \"test-operator-controller-manager-5cb74df96-cjzfx\" (UID: \"3aadd9b8-da59-45e3-979b-ac4896561d6c\") " pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.388925 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zj8k\" (UniqueName: \"kubernetes.io/projected/45ebb480-733b-47a3-a682-8fe0be16eb78-kube-api-access-9zj8k\") pod \"watcher-operator-controller-manager-864885998-pkjjd\" (UID: \"45ebb480-733b-47a3-a682-8fe0be16eb78\") " pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.463594 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.473488 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-lmss6\" (UID: \"6c508686-35cb-4c09-8ee6-2c655072d7d3\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.473540 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-metrics-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.473579 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz6k8\" (UniqueName: \"kubernetes.io/projected/66b0ef7a-14c8-4702-8e52-67809a677880-kube-api-access-qz6k8\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.473611 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.473658 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgccj\" (UniqueName: \"kubernetes.io/projected/1b5af146-d2d1-4526-8a10-84ebc35baca8-kube-api-access-jgccj\") pod \"rabbitmq-cluster-operator-manager-668c99d594-bwv87\" (UID: \"1b5af146-d2d1-4526-8a10-84ebc35baca8\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" Nov 25 09:05:07 crc kubenswrapper[4932]: E1125 09:05:07.473688 4932 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:05:07 crc kubenswrapper[4932]: E1125 09:05:07.473774 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert podName:6c508686-35cb-4c09-8ee6-2c655072d7d3 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:08.473751912 +0000 UTC m=+968.599781525 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert") pod "openstack-baremetal-operator-controller-manager-b58f89467-lmss6" (UID: "6c508686-35cb-4c09-8ee6-2c655072d7d3") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:05:07 crc kubenswrapper[4932]: E1125 09:05:07.474155 4932 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 09:05:07 crc kubenswrapper[4932]: E1125 09:05:07.474225 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs podName:66b0ef7a-14c8-4702-8e52-67809a677880 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:07.974204075 +0000 UTC m=+968.100233648 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs") pod "openstack-operator-controller-manager-7cd5954d9-bdswv" (UID: "66b0ef7a-14c8-4702-8e52-67809a677880") : secret "webhook-server-cert" not found Nov 25 09:05:07 crc kubenswrapper[4932]: E1125 09:05:07.474309 4932 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 09:05:07 crc kubenswrapper[4932]: E1125 09:05:07.474335 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-metrics-certs podName:66b0ef7a-14c8-4702-8e52-67809a677880 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:07.974327298 +0000 UTC m=+968.100356961 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-metrics-certs") pod "openstack-operator-controller-manager-7cd5954d9-bdswv" (UID: "66b0ef7a-14c8-4702-8e52-67809a677880") : secret "metrics-server-cert" not found Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.490638 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.502939 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz6k8\" (UniqueName: \"kubernetes.io/projected/66b0ef7a-14c8-4702-8e52-67809a677880-kube-api-access-qz6k8\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.506329 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgccj\" (UniqueName: \"kubernetes.io/projected/1b5af146-d2d1-4526-8a10-84ebc35baca8-kube-api-access-jgccj\") pod \"rabbitmq-cluster-operator-manager-668c99d594-bwv87\" (UID: \"1b5af146-d2d1-4526-8a10-84ebc35baca8\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.570973 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.577232 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.578006 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="91eb2e40d6f72fe209b50e6c986a543f1a5accc33bb0098951f158439d3b5195" exitCode=0 Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.578061 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"91eb2e40d6f72fe209b50e6c986a543f1a5accc33bb0098951f158439d3b5195"} Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.578096 4932 scope.go:117] "RemoveContainer" containerID="de649297e499e1a80fc45537977d7092776afd0add46df5f77009f80ee0893ea" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.579501 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" event={"ID":"a92ad4a6-d922-45c1-b02d-f382b1ea1cc0","Type":"ContainerStarted","Data":"5cf3043fdf6612fbb9e28ec66437cacef5732f17a4d5d089f5c6808e2460e021"} Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.587047 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" event={"ID":"96d031ad-3550-4423-9422-93911c9a8217","Type":"ContainerStarted","Data":"f0613ec7590a1f34b508876b583eaac37a3b392cf46e5b007618446e8d8c2b9a"} Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.647783 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.690217 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.759469 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.799290 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.839821 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.916462 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.922340 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr"] Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.932003 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt"] Nov 25 09:05:07 crc kubenswrapper[4932]: W1125 09:05:07.938996 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12f70ae4_14e2_4eed_9c1d_29e380a6d757.slice/crio-6d9330d85d27f001764fce54686622383f824e7afbb665f03e8b493ba163741f WatchSource:0}: Error finding container 6d9330d85d27f001764fce54686622383f824e7afbb665f03e8b493ba163741f: Status 404 returned error can't find the container with id 6d9330d85d27f001764fce54686622383f824e7afbb665f03e8b493ba163741f Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.981785 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-metrics-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.982165 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:07 crc kubenswrapper[4932]: E1125 09:05:07.982334 4932 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 09:05:07 crc kubenswrapper[4932]: E1125 09:05:07.982428 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs podName:66b0ef7a-14c8-4702-8e52-67809a677880 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:08.982407242 +0000 UTC m=+969.108436805 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs") pod "openstack-operator-controller-manager-7cd5954d9-bdswv" (UID: "66b0ef7a-14c8-4702-8e52-67809a677880") : secret "webhook-server-cert" not found Nov 25 09:05:07 crc kubenswrapper[4932]: I1125 09:05:07.993171 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-metrics-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.040227 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q"] Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.050787 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd"] Nov 25 09:05:08 crc kubenswrapper[4932]: W1125 09:05:08.056700 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaf96b4c7_e9eb_4609_afab_ba3cc15f0a48.slice/crio-d7e645eaf1b3d3599a3864e763a47f8e4f54b271702f2eb1dc010035cc692eb0 WatchSource:0}: Error finding container d7e645eaf1b3d3599a3864e763a47f8e4f54b271702f2eb1dc010035cc692eb0: Status 404 returned error can't find the container with id d7e645eaf1b3d3599a3864e763a47f8e4f54b271702f2eb1dc010035cc692eb0 Nov 25 09:05:08 crc kubenswrapper[4932]: W1125 09:05:08.063830 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6fcca084_72cb_48ba_948f_6c4d861f6096.slice/crio-353357fcd5264b6b56e7ab04694db97bf3dc25015b73f707b1cfeb5e7cb91ca6 WatchSource:0}: Error finding container 353357fcd5264b6b56e7ab04694db97bf3dc25015b73f707b1cfeb5e7cb91ca6: Status 404 returned error can't find the container with id 353357fcd5264b6b56e7ab04694db97bf3dc25015b73f707b1cfeb5e7cb91ca6 Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.068735 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6"] Nov 25 09:05:08 crc kubenswrapper[4932]: W1125 09:05:08.074932 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddae34761_581e_4f65_8d7c_d6c2d302b4f7.slice/crio-99025834f3f00583d5d135718d469ee0d5e83f7257914f76578fc8ddc41b0acb WatchSource:0}: Error finding container 99025834f3f00583d5d135718d469ee0d5e83f7257914f76578fc8ddc41b0acb: Status 404 returned error can't find the container with id 99025834f3f00583d5d135718d469ee0d5e83f7257914f76578fc8ddc41b0acb Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.247072 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf"] Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.258714 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct"] Nov 25 09:05:08 crc kubenswrapper[4932]: W1125 09:05:08.267967 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5a4b4dd_4498_4ab5_9ca1_1ac3ab836e5d.slice/crio-ad8bf020fa869f966840e65c22a29e2db918b3f5b35c3abaa54052c3c816f64c WatchSource:0}: Error finding container ad8bf020fa869f966840e65c22a29e2db918b3f5b35c3abaa54052c3c816f64c: Status 404 returned error can't find the container with id ad8bf020fa869f966840e65c22a29e2db918b3f5b35c3abaa54052c3c816f64c Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.268054 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8"] Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.284907 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg"] Nov 25 09:05:08 crc kubenswrapper[4932]: W1125 09:05:08.285027 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4860edf_9f45_4dd2_8e35_7c3a4444370a.slice/crio-47a8afe8769ace4772fd4115f20a2de28580267f9ec64c83a29e911c4a9723e9 WatchSource:0}: Error finding container 47a8afe8769ace4772fd4115f20a2de28580267f9ec64c83a29e911c4a9723e9: Status 404 returned error can't find the container with id 47a8afe8769ace4772fd4115f20a2de28580267f9ec64c83a29e911c4a9723e9 Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.292142 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fd45n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-tkjb4_openstack-operators(d4860edf-9f45-4dd2-8e35-7c3a4444370a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.292199 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s552v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-d5cc86f4b-5bkct_openstack-operators(765f296f-cd42-4f2c-9b21-2bcbc65d490c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.298454 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s552v,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-d5cc86f4b-5bkct_openstack-operators(765f296f-cd42-4f2c-9b21-2bcbc65d490c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.299800 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" podUID="765f296f-cd42-4f2c-9b21-2bcbc65d490c" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.301093 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4"] Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.306086 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fd45n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-6fdc4fcf86-tkjb4_openstack-operators(d4860edf-9f45-4dd2-8e35-7c3a4444370a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.308664 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" podUID="d4860edf-9f45-4dd2-8e35-7c3a4444370a" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.309047 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-drtm6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-cjzfx_openstack-operators(3aadd9b8-da59-45e3-979b-ac4896561d6c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.312055 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-drtm6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cb74df96-cjzfx_openstack-operators(3aadd9b8-da59-45e3-979b-ac4896561d6c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.312518 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx"] Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.313267 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" podUID="3aadd9b8-da59-45e3-979b-ac4896561d6c" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.381579 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-864885998-pkjjd"] Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.389321 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87"] Nov 25 09:05:08 crc kubenswrapper[4932]: W1125 09:05:08.395557 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod45ebb480_733b_47a3_a682_8fe0be16eb78.slice/crio-066f64efa8ca1053714a42e277d1b6eb82841e732ca8774c671b68c44ad5d386 WatchSource:0}: Error finding container 066f64efa8ca1053714a42e277d1b6eb82841e732ca8774c671b68c44ad5d386: Status 404 returned error can't find the container with id 066f64efa8ca1053714a42e277d1b6eb82841e732ca8774c671b68c44ad5d386 Nov 25 09:05:08 crc kubenswrapper[4932]: W1125 09:05:08.397777 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b5af146_d2d1_4526_8a10_84ebc35baca8.slice/crio-0bdd52b421d82ea28a7e02f9b810aa12038c1278c80525ff72a0965fccc5a600 WatchSource:0}: Error finding container 0bdd52b421d82ea28a7e02f9b810aa12038c1278c80525ff72a0965fccc5a600: Status 404 returned error can't find the container with id 0bdd52b421d82ea28a7e02f9b810aa12038c1278c80525ff72a0965fccc5a600 Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.398577 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9zj8k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-pkjjd_openstack-operators(45ebb480-733b-47a3-a682-8fe0be16eb78): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.399941 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28"] Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.402105 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9zj8k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-pkjjd_openstack-operators(45ebb480-733b-47a3-a682-8fe0be16eb78): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.402253 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jgccj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-bwv87_openstack-operators(1b5af146-d2d1-4526-8a10-84ebc35baca8): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.403801 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" podUID="1b5af146-d2d1-4526-8a10-84ebc35baca8" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.403861 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podUID="45ebb480-733b-47a3-a682-8fe0be16eb78" Nov 25 09:05:08 crc kubenswrapper[4932]: W1125 09:05:08.406966 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod695ce8a3_6a30_42a4_8ba2_f6309470362c.slice/crio-004e86b8f720c5c3daf49044fceede5fc58982a7cb52c1a0f882f89e832b79a5 WatchSource:0}: Error finding container 004e86b8f720c5c3daf49044fceede5fc58982a7cb52c1a0f882f89e832b79a5: Status 404 returned error can't find the container with id 004e86b8f720c5c3daf49044fceede5fc58982a7cb52c1a0f882f89e832b79a5 Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.409653 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5nx5z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-567f98c9d-blm28_openstack-operators(695ce8a3-6a30-42a4-8ba2-f6309470362c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.411641 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-5nx5z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-567f98c9d-blm28_openstack-operators(695ce8a3-6a30-42a4-8ba2-f6309470362c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.413687 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" podUID="695ce8a3-6a30-42a4-8ba2-f6309470362c" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.491746 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-lmss6\" (UID: \"6c508686-35cb-4c09-8ee6-2c655072d7d3\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.491918 4932 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.492290 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert podName:6c508686-35cb-4c09-8ee6-2c655072d7d3 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:10.492272766 +0000 UTC m=+970.618302329 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert") pod "openstack-baremetal-operator-controller-manager-b58f89467-lmss6" (UID: "6c508686-35cb-4c09-8ee6-2c655072d7d3") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.597830 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"96dca2522fb61785e671095f31f8032a7eb4d218c261ece3b2cefa1b8cd2013b"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.599826 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" event={"ID":"12f70ae4-14e2-4eed-9c1d-29e380a6d757","Type":"ContainerStarted","Data":"6d9330d85d27f001764fce54686622383f824e7afbb665f03e8b493ba163741f"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.601543 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" event={"ID":"d4860edf-9f45-4dd2-8e35-7c3a4444370a","Type":"ContainerStarted","Data":"47a8afe8769ace4772fd4115f20a2de28580267f9ec64c83a29e911c4a9723e9"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.603649 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" event={"ID":"6dedf441-145d-4642-a0f0-fb691d2edd2d","Type":"ContainerStarted","Data":"a604b264eb50dfb0c39e6918aaa920c34ea85c40f23e8f60d0854dfed51f961d"} Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.603801 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" podUID="d4860edf-9f45-4dd2-8e35-7c3a4444370a" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.605049 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" event={"ID":"1b5af146-d2d1-4526-8a10-84ebc35baca8","Type":"ContainerStarted","Data":"0bdd52b421d82ea28a7e02f9b810aa12038c1278c80525ff72a0965fccc5a600"} Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.611067 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" podUID="1b5af146-d2d1-4526-8a10-84ebc35baca8" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.620925 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" event={"ID":"243ff257-9836-4e43-9228-e05f18282650","Type":"ContainerStarted","Data":"913eb7cc257b4ddf5e7e77fc107de75f805f102eee6b05e4571533f6d72c9f22"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.620968 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" event={"ID":"070a395c-8ac5-4303-80fb-7f93282a9f99","Type":"ContainerStarted","Data":"6e4da3a2cc56038c0a03d298a1772474fc83c19a846a29ec33a5e608cfa34ff3"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.620982 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" event={"ID":"695ce8a3-6a30-42a4-8ba2-f6309470362c","Type":"ContainerStarted","Data":"004e86b8f720c5c3daf49044fceede5fc58982a7cb52c1a0f882f89e832b79a5"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.621691 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" event={"ID":"45ebb480-733b-47a3-a682-8fe0be16eb78","Type":"ContainerStarted","Data":"066f64efa8ca1053714a42e277d1b6eb82841e732ca8774c671b68c44ad5d386"} Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.621724 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" podUID="695ce8a3-6a30-42a4-8ba2-f6309470362c" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.623630 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" event={"ID":"8c014265-53e2-4c4d-9c25-452686712f2e","Type":"ContainerStarted","Data":"e60937141f74368cb55ef7c6c60a0e29ae80d4aec68d8f68b3f7e4b37c528be3"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.624811 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" event={"ID":"bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd","Type":"ContainerStarted","Data":"d648f0b7f3c7304e939ab1af55d5f3c4ac38a3bf74903e331bc08d905d6a3167"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.626134 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" event={"ID":"6fcca084-72cb-48ba-948f-6c4d861f6096","Type":"ContainerStarted","Data":"353357fcd5264b6b56e7ab04694db97bf3dc25015b73f707b1cfeb5e7cb91ca6"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.627328 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" event={"ID":"af96b4c7-e9eb-4609-afab-ba3cc15f0a48","Type":"ContainerStarted","Data":"d7e645eaf1b3d3599a3864e763a47f8e4f54b271702f2eb1dc010035cc692eb0"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.629140 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" event={"ID":"bde38973-f401-4917-8abc-08dafaf8f10c","Type":"ContainerStarted","Data":"9ae6e4a6a4d3dcee6706e0f96a2179e7355081052adf4ff40c4883d3ed25e0fd"} Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.628546 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podUID="45ebb480-733b-47a3-a682-8fe0be16eb78" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.630355 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" event={"ID":"d2216d92-9e2d-4549-b634-63ec3ada9f14","Type":"ContainerStarted","Data":"3180a2595401b21f40c0fb4f8a9bdfa3c3c028e93288bf357188cf9a19ee7291"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.633567 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" event={"ID":"3aadd9b8-da59-45e3-979b-ac4896561d6c","Type":"ContainerStarted","Data":"d256101b84d5830f5e3ae8762acceab8face68034512491cafd1b1cdda231bee"} Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.636203 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" podUID="3aadd9b8-da59-45e3-979b-ac4896561d6c" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.638870 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" event={"ID":"e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d","Type":"ContainerStarted","Data":"ad8bf020fa869f966840e65c22a29e2db918b3f5b35c3abaa54052c3c816f64c"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.645588 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" event={"ID":"dae34761-581e-4f65-8d7c-d6c2d302b4f7","Type":"ContainerStarted","Data":"99025834f3f00583d5d135718d469ee0d5e83f7257914f76578fc8ddc41b0acb"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.647528 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" event={"ID":"65fb5603-367e-431f-a8d3-0a3281a70361","Type":"ContainerStarted","Data":"3f55f04356b38f682f7334d19278085d5d88da6aaf3f9f262c2b2602d07547b2"} Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.650324 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" event={"ID":"765f296f-cd42-4f2c-9b21-2bcbc65d490c","Type":"ContainerStarted","Data":"1ae4bc2219aa4e04cc497d6ceb90494ecb4308cbf7fcbf2e3d6423dafd34b41d"} Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.658244 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" podUID="765f296f-cd42-4f2c-9b21-2bcbc65d490c" Nov 25 09:05:08 crc kubenswrapper[4932]: I1125 09:05:08.998588 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.998783 4932 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 09:05:08 crc kubenswrapper[4932]: E1125 09:05:08.998889 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs podName:66b0ef7a-14c8-4702-8e52-67809a677880 nodeName:}" failed. No retries permitted until 2025-11-25 09:05:10.998867758 +0000 UTC m=+971.124897321 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs") pod "openstack-operator-controller-manager-7cd5954d9-bdswv" (UID: "66b0ef7a-14c8-4702-8e52-67809a677880") : secret "webhook-server-cert" not found Nov 25 09:05:09 crc kubenswrapper[4932]: E1125 09:05:09.659469 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" podUID="1b5af146-d2d1-4526-8a10-84ebc35baca8" Nov 25 09:05:09 crc kubenswrapper[4932]: E1125 09:05:09.659942 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" podUID="d4860edf-9f45-4dd2-8e35-7c3a4444370a" Nov 25 09:05:09 crc kubenswrapper[4932]: E1125 09:05:09.660169 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:86df58f744c1d23233cc98f6ea17c8d6da637c50003d0fc8c100045594aa9894\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" podUID="765f296f-cd42-4f2c-9b21-2bcbc65d490c" Nov 25 09:05:09 crc kubenswrapper[4932]: E1125 09:05:09.660666 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:5324a6d2f76fc3041023b0cbd09a733ef2b59f310d390e4d6483d219eb96494f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" podUID="695ce8a3-6a30-42a4-8ba2-f6309470362c" Nov 25 09:05:09 crc kubenswrapper[4932]: E1125 09:05:09.660684 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" podUID="3aadd9b8-da59-45e3-979b-ac4896561d6c" Nov 25 09:05:09 crc kubenswrapper[4932]: E1125 09:05:09.660764 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podUID="45ebb480-733b-47a3-a682-8fe0be16eb78" Nov 25 09:05:10 crc kubenswrapper[4932]: I1125 09:05:10.521776 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-lmss6\" (UID: \"6c508686-35cb-4c09-8ee6-2c655072d7d3\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:10 crc kubenswrapper[4932]: I1125 09:05:10.527786 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6c508686-35cb-4c09-8ee6-2c655072d7d3-cert\") pod \"openstack-baremetal-operator-controller-manager-b58f89467-lmss6\" (UID: \"6c508686-35cb-4c09-8ee6-2c655072d7d3\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:10 crc kubenswrapper[4932]: I1125 09:05:10.665308 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:11 crc kubenswrapper[4932]: I1125 09:05:11.030353 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:11 crc kubenswrapper[4932]: I1125 09:05:11.049015 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/66b0ef7a-14c8-4702-8e52-67809a677880-webhook-certs\") pod \"openstack-operator-controller-manager-7cd5954d9-bdswv\" (UID: \"66b0ef7a-14c8-4702-8e52-67809a677880\") " pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:11 crc kubenswrapper[4932]: I1125 09:05:11.255480 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:16 crc kubenswrapper[4932]: I1125 09:05:16.731630 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6"] Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.012453 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv"] Nov 25 09:05:17 crc kubenswrapper[4932]: W1125 09:05:17.035784 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66b0ef7a_14c8_4702_8e52_67809a677880.slice/crio-494858f3e9b3a44720fe80a63ca8c58fece5daa731ed9108d487ab081f724c95 WatchSource:0}: Error finding container 494858f3e9b3a44720fe80a63ca8c58fece5daa731ed9108d487ab081f724c95: Status 404 returned error can't find the container with id 494858f3e9b3a44720fe80a63ca8c58fece5daa731ed9108d487ab081f724c95 Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.272273 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-r2qvp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-7c57c8bbc4-cwqvg_openstack-operators(243ff257-9836-4e43-9228-e05f18282650): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.273769 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" podUID="243ff257-9836-4e43-9228-e05f18282650" Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.304752 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-djfj5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5db546f9d9-4wph8_openstack-operators(8c014265-53e2-4c4d-9c25-452686712f2e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.306463 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" podUID="8c014265-53e2-4c4d-9c25-452686712f2e" Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.330757 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cqnsm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-68b95954c9-x4l6r_openstack-operators(bde38973-f401-4917-8abc-08dafaf8f10c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.331940 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" podUID="bde38973-f401-4917-8abc-08dafaf8f10c" Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.349390 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bc474,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-58bb8d67cc-bbmvf_openstack-operators(e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.351606 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" podUID="e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d" Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.739910 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" event={"ID":"6c508686-35cb-4c09-8ee6-2c655072d7d3","Type":"ContainerStarted","Data":"f2e08c041a5b63861edc4c34aa13c2ff84a27624dabf1fff8e84b842e5639925"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.741460 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" event={"ID":"6fcca084-72cb-48ba-948f-6c4d861f6096","Type":"ContainerStarted","Data":"8d7f2ae2f105799891d00e8724d9b7be9af07072f86d9c09ab7398bdd31c6fcd"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.742665 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" event={"ID":"12f70ae4-14e2-4eed-9c1d-29e380a6d757","Type":"ContainerStarted","Data":"670b5f67265dd16f161391e043545e897bb14e0d60e0124adb4c2c0d4a01a98e"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.761466 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" event={"ID":"bde38973-f401-4917-8abc-08dafaf8f10c","Type":"ContainerStarted","Data":"72fadcec50c8804239125f2a46e1c8627697eca76284b96988eb75e0a322d238"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.762341 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.788941 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" podUID="bde38973-f401-4917-8abc-08dafaf8f10c" Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.792336 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" event={"ID":"243ff257-9836-4e43-9228-e05f18282650","Type":"ContainerStarted","Data":"620fdc212be8c877dc6a1da7ca0be213975cdbd9c958c8187bbb631b299eb7b0"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.793039 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.796151 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" podUID="243ff257-9836-4e43-9228-e05f18282650" Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.797546 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" event={"ID":"8c014265-53e2-4c4d-9c25-452686712f2e","Type":"ContainerStarted","Data":"edb768adb7ff4cf663301d1b8c996188c33db672b1babf3414708b15e6d1259d"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.798104 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.798645 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" podUID="8c014265-53e2-4c4d-9c25-452686712f2e" Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.805809 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" event={"ID":"d2216d92-9e2d-4549-b634-63ec3ada9f14","Type":"ContainerStarted","Data":"fc9cb63539261e590f698e85996887fd7cac7bbff2609d3a62115b725ab712de"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.817819 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" event={"ID":"dae34761-581e-4f65-8d7c-d6c2d302b4f7","Type":"ContainerStarted","Data":"1374e9de03345c43f46af7f7cde3289869915310569e550bc673641cdfb01c6d"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.821322 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" event={"ID":"65fb5603-367e-431f-a8d3-0a3281a70361","Type":"ContainerStarted","Data":"a122f3f5d243279d93b07ffeaacd6c6d1cc73f4a0d88687eeaa2ef201d308db1"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.839629 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" event={"ID":"e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d","Type":"ContainerStarted","Data":"dfb5b7ed45088edf33bf7ff338fb1a32825aceda9374ead836206731c65d6034"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.840346 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.843661 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" event={"ID":"070a395c-8ac5-4303-80fb-7f93282a9f99","Type":"ContainerStarted","Data":"c64b72dae063c07f045dfc23b33e2462f956b03d635d6f660129577b2d2e6066"} Nov 25 09:05:17 crc kubenswrapper[4932]: E1125 09:05:17.844328 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" podUID="e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d" Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.844892 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" event={"ID":"96d031ad-3550-4423-9422-93911c9a8217","Type":"ContainerStarted","Data":"e085beb3bbc4f9973ec8d9f647374f6a5724be783eb3ad3796a5cc06e35a1200"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.846115 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" event={"ID":"66b0ef7a-14c8-4702-8e52-67809a677880","Type":"ContainerStarted","Data":"d67a4e59bfeba476e11186a332f647f8f5a5a7dd1d6445ad889dd9cdc0be3480"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.846137 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" event={"ID":"66b0ef7a-14c8-4702-8e52-67809a677880","Type":"ContainerStarted","Data":"494858f3e9b3a44720fe80a63ca8c58fece5daa731ed9108d487ab081f724c95"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.846663 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.850662 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" event={"ID":"6dedf441-145d-4642-a0f0-fb691d2edd2d","Type":"ContainerStarted","Data":"f9d685fb12af160c9b6f06cc5130b3243cdad011655cade2637093fe044c0f38"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.853119 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" event={"ID":"af96b4c7-e9eb-4609-afab-ba3cc15f0a48","Type":"ContainerStarted","Data":"d76a49c47f51346babbace38abde4866130679b37e8786aa1c6aaf826f3af8b1"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.855298 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" event={"ID":"a92ad4a6-d922-45c1-b02d-f382b1ea1cc0","Type":"ContainerStarted","Data":"722efff839e6dcd8a6d64e9325b7e83a9e3c202123e0da6c24ae3f271cb1dfce"} Nov 25 09:05:17 crc kubenswrapper[4932]: I1125 09:05:17.856972 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" event={"ID":"bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd","Type":"ContainerStarted","Data":"6bbe52121c4b99bd1b164c8815329292b7368a45d866693592376057fe50d359"} Nov 25 09:05:18 crc kubenswrapper[4932]: I1125 09:05:18.026706 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" podStartSLOduration=12.0266898 podStartE2EDuration="12.0266898s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:05:18.017617414 +0000 UTC m=+978.143646977" watchObservedRunningTime="2025-11-25 09:05:18.0266898 +0000 UTC m=+978.152719363" Nov 25 09:05:18 crc kubenswrapper[4932]: E1125 09:05:18.871173 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" podUID="243ff257-9836-4e43-9228-e05f18282650" Nov 25 09:05:18 crc kubenswrapper[4932]: E1125 09:05:18.872713 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" podUID="8c014265-53e2-4c4d-9c25-452686712f2e" Nov 25 09:05:18 crc kubenswrapper[4932]: E1125 09:05:18.873404 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" podUID="bde38973-f401-4917-8abc-08dafaf8f10c" Nov 25 09:05:18 crc kubenswrapper[4932]: E1125 09:05:18.873467 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" podUID="e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d" Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.884508 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" event={"ID":"6c508686-35cb-4c09-8ee6-2c655072d7d3","Type":"ContainerStarted","Data":"2032894e00f1c9c21236170ec561ddceca3856c5741bc20e620e6fd8c8c2f920"} Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.885109 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" event={"ID":"6c508686-35cb-4c09-8ee6-2c655072d7d3","Type":"ContainerStarted","Data":"721d0fdc5df3659f4d894e79772aabcb99cd77528ae02221faa20f6c47d48bd2"} Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.885155 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.887306 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" event={"ID":"af96b4c7-e9eb-4609-afab-ba3cc15f0a48","Type":"ContainerStarted","Data":"9278fc0fc48ea084a0e00ae05540ef99a499c2bfa9f2c72dd72d3de8fd6e63a3"} Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.887441 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.890164 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" event={"ID":"dae34761-581e-4f65-8d7c-d6c2d302b4f7","Type":"ContainerStarted","Data":"0d7ef1c558d58e41af2761e0f4632575de096d93751c05059f66ea10f9b11740"} Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.890335 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.892801 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" event={"ID":"070a395c-8ac5-4303-80fb-7f93282a9f99","Type":"ContainerStarted","Data":"96b17cdd40c276b99f9762d38bfb1dcc67bc42762ef8369096426e4163185719"} Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.893235 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.923315 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" podStartSLOduration=11.35443628 podStartE2EDuration="14.923294675s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:16.757694061 +0000 UTC m=+976.883723624" lastFinishedPulling="2025-11-25 09:05:20.326552456 +0000 UTC m=+980.452582019" observedRunningTime="2025-11-25 09:05:20.920756193 +0000 UTC m=+981.046785766" watchObservedRunningTime="2025-11-25 09:05:20.923294675 +0000 UTC m=+981.049324238" Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.939254 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" podStartSLOduration=2.453133409 podStartE2EDuration="14.939228704s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.078666343 +0000 UTC m=+968.204695906" lastFinishedPulling="2025-11-25 09:05:20.564761638 +0000 UTC m=+980.690791201" observedRunningTime="2025-11-25 09:05:20.93411213 +0000 UTC m=+981.060141693" watchObservedRunningTime="2025-11-25 09:05:20.939228704 +0000 UTC m=+981.065258267" Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.956274 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" podStartSLOduration=2.273629787 podStartE2EDuration="14.956252474s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:07.933803768 +0000 UTC m=+968.059833331" lastFinishedPulling="2025-11-25 09:05:20.616426445 +0000 UTC m=+980.742456018" observedRunningTime="2025-11-25 09:05:20.950308327 +0000 UTC m=+981.076337910" watchObservedRunningTime="2025-11-25 09:05:20.956252474 +0000 UTC m=+981.082282037" Nov 25 09:05:20 crc kubenswrapper[4932]: I1125 09:05:20.977085 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" podStartSLOduration=2.600809055 podStartE2EDuration="14.977067242s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.059050978 +0000 UTC m=+968.185080541" lastFinishedPulling="2025-11-25 09:05:20.435309165 +0000 UTC m=+980.561338728" observedRunningTime="2025-11-25 09:05:20.972537874 +0000 UTC m=+981.098567437" watchObservedRunningTime="2025-11-25 09:05:20.977067242 +0000 UTC m=+981.103096815" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.911014 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" event={"ID":"bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd","Type":"ContainerStarted","Data":"8f34859bf2610400f05969bc8a3ea29fe1c4bc9715ed64e1cecc1f83d501a7d1"} Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.911222 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.913876 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" event={"ID":"6fcca084-72cb-48ba-948f-6c4d861f6096","Type":"ContainerStarted","Data":"709ba9332d8c94c09f558fdea4ff92f4c3b69ff1b4d502cee93d384087e7ac40"} Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.914155 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.916362 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" event={"ID":"12f70ae4-14e2-4eed-9c1d-29e380a6d757","Type":"ContainerStarted","Data":"81dc492d97062a939345545056b80805e6f55913cda44802511065f6c012b2f2"} Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.916416 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.918424 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" event={"ID":"d2216d92-9e2d-4549-b634-63ec3ada9f14","Type":"ContainerStarted","Data":"b0d244b0fad22f792c8286cf5d3a1fd6da9ef4ad045b1ec3e8f3ae198a8e3a91"} Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.918593 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.919023 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.920672 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" event={"ID":"65fb5603-367e-431f-a8d3-0a3281a70361","Type":"ContainerStarted","Data":"8be3aa7931c6fe662d0030da9bebe85cd10da68a7c4016b9ae7e6d8cdd21336b"} Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.920979 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.922706 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" event={"ID":"96d031ad-3550-4423-9422-93911c9a8217","Type":"ContainerStarted","Data":"7ee37ff8c61bcdcefcf17e18a1e3062fe50e40897366fac893545bbee3fdf6ed"} Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.922838 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.922920 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.923087 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.926713 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" podStartSLOduration=2.766704423 podStartE2EDuration="15.926696548s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:07.599037874 +0000 UTC m=+967.725067437" lastFinishedPulling="2025-11-25 09:05:20.759029999 +0000 UTC m=+980.885059562" observedRunningTime="2025-11-25 09:05:21.924874267 +0000 UTC m=+982.050903850" watchObservedRunningTime="2025-11-25 09:05:21.926696548 +0000 UTC m=+982.052726111" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.929047 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" event={"ID":"a92ad4a6-d922-45c1-b02d-f382b1ea1cc0","Type":"ContainerStarted","Data":"32c30765d29592d2e1f5cdedd703b00595f717b98c8f6634d4058b599d37551b"} Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.930166 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.933778 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.935437 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" event={"ID":"6dedf441-145d-4642-a0f0-fb691d2edd2d","Type":"ContainerStarted","Data":"0038d5b8bbc55f157ae47a4329767744482db94c22342ba6b342ab2538245528"} Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.937899 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.978655 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" podStartSLOduration=2.5220962719999998 podStartE2EDuration="15.978633214s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:07.807516108 +0000 UTC m=+967.933545671" lastFinishedPulling="2025-11-25 09:05:21.26405305 +0000 UTC m=+981.390082613" observedRunningTime="2025-11-25 09:05:21.952400414 +0000 UTC m=+982.078429987" watchObservedRunningTime="2025-11-25 09:05:21.978633214 +0000 UTC m=+982.104662777" Nov 25 09:05:21 crc kubenswrapper[4932]: I1125 09:05:21.981521 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" podStartSLOduration=2.738573847 podStartE2EDuration="15.981505545s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:07.941098594 +0000 UTC m=+968.067128157" lastFinishedPulling="2025-11-25 09:05:21.184030302 +0000 UTC m=+981.310059855" observedRunningTime="2025-11-25 09:05:21.981437393 +0000 UTC m=+982.107466966" watchObservedRunningTime="2025-11-25 09:05:21.981505545 +0000 UTC m=+982.107535108" Nov 25 09:05:22 crc kubenswrapper[4932]: I1125 09:05:22.012615 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" podStartSLOduration=3.0184129 podStartE2EDuration="16.012588132s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.066673074 +0000 UTC m=+968.192702637" lastFinishedPulling="2025-11-25 09:05:21.060848306 +0000 UTC m=+981.186877869" observedRunningTime="2025-11-25 09:05:22.006318475 +0000 UTC m=+982.132348068" watchObservedRunningTime="2025-11-25 09:05:22.012588132 +0000 UTC m=+982.138617705" Nov 25 09:05:22 crc kubenswrapper[4932]: I1125 09:05:22.036708 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" podStartSLOduration=1.9361262780000001 podStartE2EDuration="16.036690292s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:07.280605681 +0000 UTC m=+967.406635244" lastFinishedPulling="2025-11-25 09:05:21.381169695 +0000 UTC m=+981.507199258" observedRunningTime="2025-11-25 09:05:22.030434416 +0000 UTC m=+982.156463989" watchObservedRunningTime="2025-11-25 09:05:22.036690292 +0000 UTC m=+982.162719855" Nov 25 09:05:22 crc kubenswrapper[4932]: I1125 09:05:22.055462 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" podStartSLOduration=2.934558792 podStartE2EDuration="16.055443371s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:07.836487417 +0000 UTC m=+967.962516980" lastFinishedPulling="2025-11-25 09:05:20.957371996 +0000 UTC m=+981.083401559" observedRunningTime="2025-11-25 09:05:22.04974188 +0000 UTC m=+982.175771443" watchObservedRunningTime="2025-11-25 09:05:22.055443371 +0000 UTC m=+982.181472934" Nov 25 09:05:22 crc kubenswrapper[4932]: I1125 09:05:22.112693 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" podStartSLOduration=2.873899095 podStartE2EDuration="16.112669986s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:07.925785611 +0000 UTC m=+968.051815174" lastFinishedPulling="2025-11-25 09:05:21.164556492 +0000 UTC m=+981.290586065" observedRunningTime="2025-11-25 09:05:22.107914552 +0000 UTC m=+982.233944115" watchObservedRunningTime="2025-11-25 09:05:22.112669986 +0000 UTC m=+982.238699559" Nov 25 09:05:22 crc kubenswrapper[4932]: I1125 09:05:22.126835 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" podStartSLOduration=2.619510706 podStartE2EDuration="16.126805395s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:07.354289624 +0000 UTC m=+967.480319187" lastFinishedPulling="2025-11-25 09:05:20.861584303 +0000 UTC m=+980.987613876" observedRunningTime="2025-11-25 09:05:22.122674208 +0000 UTC m=+982.248703781" watchObservedRunningTime="2025-11-25 09:05:22.126805395 +0000 UTC m=+982.252834968" Nov 25 09:05:22 crc kubenswrapper[4932]: I1125 09:05:22.947254 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 09:05:22 crc kubenswrapper[4932]: I1125 09:05:22.950645 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 09:05:22 crc kubenswrapper[4932]: I1125 09:05:22.950717 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 09:05:22 crc kubenswrapper[4932]: I1125 09:05:22.951969 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 09:05:22 crc kubenswrapper[4932]: I1125 09:05:22.952751 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 09:05:22 crc kubenswrapper[4932]: I1125 09:05:22.953840 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 09:05:25 crc kubenswrapper[4932]: I1125 09:05:25.975156 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" event={"ID":"3aadd9b8-da59-45e3-979b-ac4896561d6c","Type":"ContainerStarted","Data":"c8ab4115cd41bb1ce7e7d23045f95f7d210eaea00b7026a3c55d0d2c593dfa17"} Nov 25 09:05:25 crc kubenswrapper[4932]: I1125 09:05:25.978671 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" event={"ID":"695ce8a3-6a30-42a4-8ba2-f6309470362c","Type":"ContainerStarted","Data":"486c1f17e94d617e3e0daf3f196112ae9c290336e9a050cd7125e0ef942fc0f5"} Nov 25 09:05:25 crc kubenswrapper[4932]: I1125 09:05:25.981750 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" event={"ID":"d4860edf-9f45-4dd2-8e35-7c3a4444370a","Type":"ContainerStarted","Data":"a82748d48467f19e2ec49734076e60948c2d035072487bd0205e49acac781dc4"} Nov 25 09:05:26 crc kubenswrapper[4932]: I1125 09:05:26.834486 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 09:05:26 crc kubenswrapper[4932]: I1125 09:05:26.965541 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 09:05:26 crc kubenswrapper[4932]: I1125 09:05:26.997321 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" event={"ID":"695ce8a3-6a30-42a4-8ba2-f6309470362c","Type":"ContainerStarted","Data":"3ef0f4bdf0e96b7707908072c897473890e099ebb45baebb36b4bfbdffe34413"} Nov 25 09:05:26 crc kubenswrapper[4932]: I1125 09:05:26.997454 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 09:05:27 crc kubenswrapper[4932]: I1125 09:05:27.003216 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" event={"ID":"d4860edf-9f45-4dd2-8e35-7c3a4444370a","Type":"ContainerStarted","Data":"7cff1bb5d90f6ac896f72571ac105aaab98a6aa6cdc74233cd4ec909c472fc70"} Nov 25 09:05:27 crc kubenswrapper[4932]: I1125 09:05:27.004389 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 09:05:27 crc kubenswrapper[4932]: I1125 09:05:27.006418 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" event={"ID":"3aadd9b8-da59-45e3-979b-ac4896561d6c","Type":"ContainerStarted","Data":"28cc73d864fdf6306393da7935a629fa265d920a729bb0255d854c22d8252858"} Nov 25 09:05:27 crc kubenswrapper[4932]: I1125 09:05:27.006561 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" Nov 25 09:05:27 crc kubenswrapper[4932]: I1125 09:05:27.017740 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" podStartSLOduration=3.717720051 podStartE2EDuration="21.017716814s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.409497696 +0000 UTC m=+968.535527259" lastFinishedPulling="2025-11-25 09:05:25.709494459 +0000 UTC m=+985.835524022" observedRunningTime="2025-11-25 09:05:27.01475685 +0000 UTC m=+987.140786423" watchObservedRunningTime="2025-11-25 09:05:27.017716814 +0000 UTC m=+987.143746407" Nov 25 09:05:27 crc kubenswrapper[4932]: I1125 09:05:27.035091 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" podStartSLOduration=3.637217152 podStartE2EDuration="21.035072893s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.308892091 +0000 UTC m=+968.434921654" lastFinishedPulling="2025-11-25 09:05:25.706747832 +0000 UTC m=+985.832777395" observedRunningTime="2025-11-25 09:05:27.033065487 +0000 UTC m=+987.159095060" watchObservedRunningTime="2025-11-25 09:05:27.035072893 +0000 UTC m=+987.161102456" Nov 25 09:05:27 crc kubenswrapper[4932]: I1125 09:05:27.046383 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 09:05:27 crc kubenswrapper[4932]: I1125 09:05:27.056753 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" podStartSLOduration=3.646574656 podStartE2EDuration="21.056732255s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.291901021 +0000 UTC m=+968.417930584" lastFinishedPulling="2025-11-25 09:05:25.70205862 +0000 UTC m=+985.828088183" observedRunningTime="2025-11-25 09:05:27.052894446 +0000 UTC m=+987.178924019" watchObservedRunningTime="2025-11-25 09:05:27.056732255 +0000 UTC m=+987.182761818" Nov 25 09:05:27 crc kubenswrapper[4932]: I1125 09:05:27.123258 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 09:05:27 crc kubenswrapper[4932]: I1125 09:05:27.234673 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 09:05:30 crc kubenswrapper[4932]: I1125 09:05:30.670903 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 09:05:31 crc kubenswrapper[4932]: I1125 09:05:31.261476 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 09:05:37 crc kubenswrapper[4932]: I1125 09:05:37.468028 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 09:05:37 crc kubenswrapper[4932]: E1125 09:05:37.490292 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f" Nov 25 09:05:37 crc kubenswrapper[4932]: E1125 09:05:37.490502 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9zj8k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-864885998-pkjjd_openstack-operators(45ebb480-733b-47a3-a682-8fe0be16eb78): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:05:37 crc kubenswrapper[4932]: I1125 09:05:37.495531 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 09:05:37 crc kubenswrapper[4932]: I1125 09:05:37.580616 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" Nov 25 09:05:38 crc kubenswrapper[4932]: E1125 09:05:38.932047 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 25 09:05:38 crc kubenswrapper[4932]: E1125 09:05:38.932481 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jgccj,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-bwv87_openstack-operators(1b5af146-d2d1-4526-8a10-84ebc35baca8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:05:38 crc kubenswrapper[4932]: E1125 09:05:38.934135 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" podUID="1b5af146-d2d1-4526-8a10-84ebc35baca8" Nov 25 09:05:39 crc kubenswrapper[4932]: E1125 09:05:39.223213 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podUID="45ebb480-733b-47a3-a682-8fe0be16eb78" Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.105320 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" event={"ID":"e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d","Type":"ContainerStarted","Data":"9e6bdabe24cf1101607da88ea6fba88c799f4a652b72cd85f5683292bc08b641"} Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.107933 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" event={"ID":"bde38973-f401-4917-8abc-08dafaf8f10c","Type":"ContainerStarted","Data":"33b3add3ba9017852e4f63c1498c110417183a34f9654157fb883ae2675311ce"} Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.111399 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" event={"ID":"243ff257-9836-4e43-9228-e05f18282650","Type":"ContainerStarted","Data":"4040767cc976646c0b9a1492a76a46b3e58b31be42bf241bc8bcdb44b87957e4"} Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.114460 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" event={"ID":"765f296f-cd42-4f2c-9b21-2bcbc65d490c","Type":"ContainerStarted","Data":"05eb188ef3301cf070062279fdd63b26de6a199e8036c5adf815bc1b8565d765"} Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.114539 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" event={"ID":"765f296f-cd42-4f2c-9b21-2bcbc65d490c","Type":"ContainerStarted","Data":"aa560081a8fee703b3ea68394c811794ee09b408bd97475698ae11c8a3440f5e"} Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.114796 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.117686 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" event={"ID":"45ebb480-733b-47a3-a682-8fe0be16eb78","Type":"ContainerStarted","Data":"df63bb2fc27fc99fc61e3c50c4ea9483231a944461c7bd52e2cb09f5ce31891a"} Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.120183 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" event={"ID":"8c014265-53e2-4c4d-9c25-452686712f2e","Type":"ContainerStarted","Data":"9bc9c956efa098129e753bbb3b5ac3dde42db432b5b7b1d50e4c93bc3047b21c"} Nov 25 09:05:40 crc kubenswrapper[4932]: E1125 09:05:40.120588 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podUID="45ebb480-733b-47a3-a682-8fe0be16eb78" Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.136245 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" podStartSLOduration=25.852980045 podStartE2EDuration="34.136224865s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.285679415 +0000 UTC m=+968.411708978" lastFinishedPulling="2025-11-25 09:05:16.568924235 +0000 UTC m=+976.694953798" observedRunningTime="2025-11-25 09:05:40.130166524 +0000 UTC m=+1000.256196077" watchObservedRunningTime="2025-11-25 09:05:40.136224865 +0000 UTC m=+1000.262254438" Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.153217 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" podStartSLOduration=3.533329481 podStartE2EDuration="34.153200984s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.291951782 +0000 UTC m=+968.417981345" lastFinishedPulling="2025-11-25 09:05:38.911823285 +0000 UTC m=+999.037852848" observedRunningTime="2025-11-25 09:05:40.152435283 +0000 UTC m=+1000.278464846" watchObservedRunningTime="2025-11-25 09:05:40.153200984 +0000 UTC m=+1000.279230547" Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.194673 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" podStartSLOduration=25.427574686 podStartE2EDuration="34.194652534s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:07.801271251 +0000 UTC m=+967.927300814" lastFinishedPulling="2025-11-25 09:05:16.568349099 +0000 UTC m=+976.694378662" observedRunningTime="2025-11-25 09:05:40.187470211 +0000 UTC m=+1000.313499764" watchObservedRunningTime="2025-11-25 09:05:40.194652534 +0000 UTC m=+1000.320682097" Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.229128 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" podStartSLOduration=25.881633013 podStartE2EDuration="34.229102715s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.288866885 +0000 UTC m=+968.414896448" lastFinishedPulling="2025-11-25 09:05:16.636336587 +0000 UTC m=+976.762366150" observedRunningTime="2025-11-25 09:05:40.216077969 +0000 UTC m=+1000.342107542" watchObservedRunningTime="2025-11-25 09:05:40.229102715 +0000 UTC m=+1000.355132278" Nov 25 09:05:40 crc kubenswrapper[4932]: I1125 09:05:40.238129 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" podStartSLOduration=25.897371456 podStartE2EDuration="34.238114409s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.281762144 +0000 UTC m=+968.407791707" lastFinishedPulling="2025-11-25 09:05:16.622505087 +0000 UTC m=+976.748534660" observedRunningTime="2025-11-25 09:05:40.237824401 +0000 UTC m=+1000.363853964" watchObservedRunningTime="2025-11-25 09:05:40.238114409 +0000 UTC m=+1000.364143972" Nov 25 09:05:47 crc kubenswrapper[4932]: I1125 09:05:47.259683 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 09:05:49 crc kubenswrapper[4932]: E1125 09:05:49.610206 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" podUID="1b5af146-d2d1-4526-8a10-84ebc35baca8" Nov 25 09:05:53 crc kubenswrapper[4932]: E1125 09:05:53.609942 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:4838402d41d42c56613d43dc5041aae475a2b18e6172491d6c4d4a78a580697f\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podUID="45ebb480-733b-47a3-a682-8fe0be16eb78" Nov 25 09:06:00 crc kubenswrapper[4932]: I1125 09:06:00.613318 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:06:05 crc kubenswrapper[4932]: I1125 09:06:05.329057 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" event={"ID":"1b5af146-d2d1-4526-8a10-84ebc35baca8","Type":"ContainerStarted","Data":"3f5b0313d6e96c29af56708f8b962ed783dbe6ab8c69c7a4e6d6b4f5f04ae7b1"} Nov 25 09:06:05 crc kubenswrapper[4932]: I1125 09:06:05.348562 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" podStartSLOduration=2.330554933 podStartE2EDuration="58.348543586s" podCreationTimestamp="2025-11-25 09:05:07 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.402158948 +0000 UTC m=+968.528188511" lastFinishedPulling="2025-11-25 09:06:04.420147601 +0000 UTC m=+1024.546177164" observedRunningTime="2025-11-25 09:06:05.342738373 +0000 UTC m=+1025.468767926" watchObservedRunningTime="2025-11-25 09:06:05.348543586 +0000 UTC m=+1025.474573149" Nov 25 09:06:06 crc kubenswrapper[4932]: I1125 09:06:06.336649 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" event={"ID":"45ebb480-733b-47a3-a682-8fe0be16eb78","Type":"ContainerStarted","Data":"41061e6d0c38484f6b609924cf9a1c5a91be7d691bcc15aed58391a019a70189"} Nov 25 09:06:06 crc kubenswrapper[4932]: I1125 09:06:06.338255 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 09:06:17 crc kubenswrapper[4932]: I1125 09:06:17.652553 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 09:06:17 crc kubenswrapper[4932]: I1125 09:06:17.675329 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podStartSLOduration=14.173221368 podStartE2EDuration="1m11.675303637s" podCreationTimestamp="2025-11-25 09:05:06 +0000 UTC" firstStartedPulling="2025-11-25 09:05:08.398426083 +0000 UTC m=+968.524455646" lastFinishedPulling="2025-11-25 09:06:05.900508352 +0000 UTC m=+1026.026537915" observedRunningTime="2025-11-25 09:06:06.355082739 +0000 UTC m=+1026.481112302" watchObservedRunningTime="2025-11-25 09:06:17.675303637 +0000 UTC m=+1037.801333230" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.650328 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-4mjtq"] Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.653435 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.660510 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.660638 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.661341 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-cqswh" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.661551 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.671343 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-4mjtq"] Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.729802 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6584b49599-htq59"] Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.731156 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.732889 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.747377 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-htq59"] Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.806172 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7q4j\" (UniqueName: \"kubernetes.io/projected/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-kube-api-access-p7q4j\") pod \"dnsmasq-dns-6584b49599-htq59\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.806253 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-dns-svc\") pod \"dnsmasq-dns-6584b49599-htq59\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.806367 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ca7360e-6db1-4a86-a7f5-c99741b6c847-config\") pod \"dnsmasq-dns-7bdd77c89-4mjtq\" (UID: \"2ca7360e-6db1-4a86-a7f5-c99741b6c847\") " pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.806405 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6d87\" (UniqueName: \"kubernetes.io/projected/2ca7360e-6db1-4a86-a7f5-c99741b6c847-kube-api-access-w6d87\") pod \"dnsmasq-dns-7bdd77c89-4mjtq\" (UID: \"2ca7360e-6db1-4a86-a7f5-c99741b6c847\") " pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.806435 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-config\") pod \"dnsmasq-dns-6584b49599-htq59\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.907509 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ca7360e-6db1-4a86-a7f5-c99741b6c847-config\") pod \"dnsmasq-dns-7bdd77c89-4mjtq\" (UID: \"2ca7360e-6db1-4a86-a7f5-c99741b6c847\") " pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.907562 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6d87\" (UniqueName: \"kubernetes.io/projected/2ca7360e-6db1-4a86-a7f5-c99741b6c847-kube-api-access-w6d87\") pod \"dnsmasq-dns-7bdd77c89-4mjtq\" (UID: \"2ca7360e-6db1-4a86-a7f5-c99741b6c847\") " pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.907597 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-config\") pod \"dnsmasq-dns-6584b49599-htq59\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.907621 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7q4j\" (UniqueName: \"kubernetes.io/projected/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-kube-api-access-p7q4j\") pod \"dnsmasq-dns-6584b49599-htq59\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.907658 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-dns-svc\") pod \"dnsmasq-dns-6584b49599-htq59\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.908538 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ca7360e-6db1-4a86-a7f5-c99741b6c847-config\") pod \"dnsmasq-dns-7bdd77c89-4mjtq\" (UID: \"2ca7360e-6db1-4a86-a7f5-c99741b6c847\") " pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.908569 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-dns-svc\") pod \"dnsmasq-dns-6584b49599-htq59\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.908623 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-config\") pod \"dnsmasq-dns-6584b49599-htq59\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.932469 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7q4j\" (UniqueName: \"kubernetes.io/projected/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-kube-api-access-p7q4j\") pod \"dnsmasq-dns-6584b49599-htq59\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.932567 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6d87\" (UniqueName: \"kubernetes.io/projected/2ca7360e-6db1-4a86-a7f5-c99741b6c847-kube-api-access-w6d87\") pod \"dnsmasq-dns-7bdd77c89-4mjtq\" (UID: \"2ca7360e-6db1-4a86-a7f5-c99741b6c847\") " pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" Nov 25 09:06:31 crc kubenswrapper[4932]: I1125 09:06:31.972928 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" Nov 25 09:06:32 crc kubenswrapper[4932]: I1125 09:06:32.047814 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:32 crc kubenswrapper[4932]: I1125 09:06:32.555103 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-4mjtq"] Nov 25 09:06:32 crc kubenswrapper[4932]: I1125 09:06:32.620405 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-htq59"] Nov 25 09:06:32 crc kubenswrapper[4932]: W1125 09:06:32.629259 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd7f1ac4_6492_4fde_ac5b_1dc04db598a8.slice/crio-8c257c40454cbffb399a57c9984dab18e5222e2e5df01413343368a94b1794c8 WatchSource:0}: Error finding container 8c257c40454cbffb399a57c9984dab18e5222e2e5df01413343368a94b1794c8: Status 404 returned error can't find the container with id 8c257c40454cbffb399a57c9984dab18e5222e2e5df01413343368a94b1794c8 Nov 25 09:06:33 crc kubenswrapper[4932]: I1125 09:06:33.539833 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" event={"ID":"2ca7360e-6db1-4a86-a7f5-c99741b6c847","Type":"ContainerStarted","Data":"9edd74e2b02b6c7c00ebb9de2923a76021e7a5f48fe9f60444593a79c28883fd"} Nov 25 09:06:33 crc kubenswrapper[4932]: I1125 09:06:33.542965 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-htq59" event={"ID":"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8","Type":"ContainerStarted","Data":"8c257c40454cbffb399a57c9984dab18e5222e2e5df01413343368a94b1794c8"} Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.435919 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-htq59"] Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.468895 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-xvf9s"] Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.472961 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.483846 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-xvf9s"] Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.559890 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-config\") pod \"dnsmasq-dns-7c6d9948dc-xvf9s\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.559930 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km22k\" (UniqueName: \"kubernetes.io/projected/79ba2d28-9208-4051-a300-d8b48a88aafe-kube-api-access-km22k\") pod \"dnsmasq-dns-7c6d9948dc-xvf9s\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.559969 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-xvf9s\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.661711 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-xvf9s\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.661951 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-config\") pod \"dnsmasq-dns-7c6d9948dc-xvf9s\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.661973 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km22k\" (UniqueName: \"kubernetes.io/projected/79ba2d28-9208-4051-a300-d8b48a88aafe-kube-api-access-km22k\") pod \"dnsmasq-dns-7c6d9948dc-xvf9s\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.663798 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-xvf9s\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.667491 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-config\") pod \"dnsmasq-dns-7c6d9948dc-xvf9s\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.708049 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km22k\" (UniqueName: \"kubernetes.io/projected/79ba2d28-9208-4051-a300-d8b48a88aafe-kube-api-access-km22k\") pod \"dnsmasq-dns-7c6d9948dc-xvf9s\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.803128 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.807948 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-4mjtq"] Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.891256 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-k44zw"] Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.892590 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.898263 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-k44zw"] Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.978646 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-dns-svc\") pod \"dnsmasq-dns-6486446b9f-k44zw\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.979027 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfpnh\" (UniqueName: \"kubernetes.io/projected/8f744988-197a-4134-8488-549633bf4dc8-kube-api-access-gfpnh\") pod \"dnsmasq-dns-6486446b9f-k44zw\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:34 crc kubenswrapper[4932]: I1125 09:06:34.979059 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-config\") pod \"dnsmasq-dns-6486446b9f-k44zw\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.080012 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-dns-svc\") pod \"dnsmasq-dns-6486446b9f-k44zw\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.080176 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfpnh\" (UniqueName: \"kubernetes.io/projected/8f744988-197a-4134-8488-549633bf4dc8-kube-api-access-gfpnh\") pod \"dnsmasq-dns-6486446b9f-k44zw\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.080227 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-config\") pod \"dnsmasq-dns-6486446b9f-k44zw\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.081334 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-config\") pod \"dnsmasq-dns-6486446b9f-k44zw\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.081674 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-dns-svc\") pod \"dnsmasq-dns-6486446b9f-k44zw\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.115147 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfpnh\" (UniqueName: \"kubernetes.io/projected/8f744988-197a-4134-8488-549633bf4dc8-kube-api-access-gfpnh\") pod \"dnsmasq-dns-6486446b9f-k44zw\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.253301 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-xvf9s"] Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.284560 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.582572 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" event={"ID":"79ba2d28-9208-4051-a300-d8b48a88aafe","Type":"ContainerStarted","Data":"29cd03945a44073d2256a7d3d449db2e7a0a4dc26a47d01965ff07ff3a4be283"} Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.617266 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.618875 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.620524 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.620747 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-6fbjr" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.621034 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.622570 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.630826 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.631082 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.631238 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.631497 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.692369 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/969d317e-0787-44a8-8e27-554b0e887444-pod-info\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.692424 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/969d317e-0787-44a8-8e27-554b0e887444-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.692448 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpgft\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-kube-api-access-xpgft\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.692505 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.692529 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.692547 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-server-conf\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.692573 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.692627 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.692658 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.692683 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.692701 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.764028 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-k44zw"] Nov 25 09:06:35 crc kubenswrapper[4932]: W1125 09:06:35.779340 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8f744988_197a_4134_8488_549633bf4dc8.slice/crio-943d01e8056ceb7457860de82384f76c9d78b5387400bf462f5112111882b20f WatchSource:0}: Error finding container 943d01e8056ceb7457860de82384f76c9d78b5387400bf462f5112111882b20f: Status 404 returned error can't find the container with id 943d01e8056ceb7457860de82384f76c9d78b5387400bf462f5112111882b20f Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.793849 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.793891 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.793909 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-server-conf\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.793930 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.793971 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.793991 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.794020 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.794038 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.794068 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/969d317e-0787-44a8-8e27-554b0e887444-pod-info\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.794090 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/969d317e-0787-44a8-8e27-554b0e887444-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.794106 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpgft\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-kube-api-access-xpgft\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.794633 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.796647 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.796869 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.800784 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/969d317e-0787-44a8-8e27-554b0e887444-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.801121 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.803926 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.810083 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpgft\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-kube-api-access-xpgft\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.842271 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-server-conf\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.843729 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.844539 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.851301 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.851942 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/969d317e-0787-44a8-8e27-554b0e887444-pod-info\") pod \"rabbitmq-server-0\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " pod="openstack/rabbitmq-server-0" Nov 25 09:06:35 crc kubenswrapper[4932]: I1125 09:06:35.958710 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.028113 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.029391 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.029469 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.034716 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.034716 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.034812 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.034841 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.034993 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.035689 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.035832 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-n8lbs" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.100577 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.100640 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.100669 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f41b25a4-f48e-4938-9c23-0d89751af6ae-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.100702 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f41b25a4-f48e-4938-9c23-0d89751af6ae-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.100731 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.100774 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zljvp\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-kube-api-access-zljvp\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.100806 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.100821 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.100855 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.100873 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.100894 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203150 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203218 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203245 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f41b25a4-f48e-4938-9c23-0d89751af6ae-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203283 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f41b25a4-f48e-4938-9c23-0d89751af6ae-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203318 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203343 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zljvp\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-kube-api-access-zljvp\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203374 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203394 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203455 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203480 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203501 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.203523 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.204321 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.205135 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.205991 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.206512 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.208144 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.209239 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.209544 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f41b25a4-f48e-4938-9c23-0d89751af6ae-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.216443 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f41b25a4-f48e-4938-9c23-0d89751af6ae-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.216648 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.227296 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zljvp\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-kube-api-access-zljvp\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.245808 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.363505 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.518006 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.598920 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" event={"ID":"8f744988-197a-4134-8488-549633bf4dc8","Type":"ContainerStarted","Data":"943d01e8056ceb7457860de82384f76c9d78b5387400bf462f5112111882b20f"} Nov 25 09:06:36 crc kubenswrapper[4932]: I1125 09:06:36.640255 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.110049 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.111546 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.113422 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.115481 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.115654 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.115758 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-82gjf" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.123381 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.124327 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.221909 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.221975 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-kolla-config\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.222005 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8134265d-9da9-4607-8db8-98330608ba4c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.222029 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.222072 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrdct\" (UniqueName: \"kubernetes.io/projected/8134265d-9da9-4607-8db8-98330608ba4c-kube-api-access-rrdct\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.222106 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.222128 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.222274 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-config-data-default\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.323478 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-config-data-default\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.323540 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.323565 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-kolla-config\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.323584 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8134265d-9da9-4607-8db8-98330608ba4c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.323600 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.323634 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrdct\" (UniqueName: \"kubernetes.io/projected/8134265d-9da9-4607-8db8-98330608ba4c-kube-api-access-rrdct\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.323658 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.323673 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.324831 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8134265d-9da9-4607-8db8-98330608ba4c-config-data-generated\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.325640 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-config-data-default\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.325819 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.330005 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-kolla-config\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.331014 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-operator-scripts\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.338731 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.355781 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.358634 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrdct\" (UniqueName: \"kubernetes.io/projected/8134265d-9da9-4607-8db8-98330608ba4c-kube-api-access-rrdct\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.365209 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " pod="openstack/openstack-galera-0" Nov 25 09:06:37 crc kubenswrapper[4932]: I1125 09:06:37.436960 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.284034 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.285309 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.287362 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-xz8ql" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.287517 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.288507 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.288668 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.292350 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.351465 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdmb4\" (UniqueName: \"kubernetes.io/projected/2023df73-6a92-4838-8d5e-31f533796950-kube-api-access-jdmb4\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.351542 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.351631 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.351672 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.351697 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.353890 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.353962 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2023df73-6a92-4838-8d5e-31f533796950-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.354019 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.456229 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.456275 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.456326 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.456691 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.456735 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2023df73-6a92-4838-8d5e-31f533796950-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.456752 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.456784 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdmb4\" (UniqueName: \"kubernetes.io/projected/2023df73-6a92-4838-8d5e-31f533796950-kube-api-access-jdmb4\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.456800 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.456833 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.457112 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.457513 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2023df73-6a92-4838-8d5e-31f533796950-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.458753 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.459502 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.462504 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.465035 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.473600 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdmb4\" (UniqueName: \"kubernetes.io/projected/2023df73-6a92-4838-8d5e-31f533796950-kube-api-access-jdmb4\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.478081 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-cell1-galera-0\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.613658 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.738925 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.740533 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.744048 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.745706 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.746062 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-9ng6c" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.759617 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.760932 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.760995 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-config-data\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.761045 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.761066 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5d4k\" (UniqueName: \"kubernetes.io/projected/cc680bc2-b240-40b6-b77e-c0d264f283b3-kube-api-access-n5d4k\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.761109 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-kolla-config\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.862147 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-kolla-config\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.862267 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.862311 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-config-data\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.862351 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.862369 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5d4k\" (UniqueName: \"kubernetes.io/projected/cc680bc2-b240-40b6-b77e-c0d264f283b3-kube-api-access-n5d4k\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.863038 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-config-data\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.863158 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-kolla-config\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.867283 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.867660 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:38 crc kubenswrapper[4932]: I1125 09:06:38.890831 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5d4k\" (UniqueName: \"kubernetes.io/projected/cc680bc2-b240-40b6-b77e-c0d264f283b3-kube-api-access-n5d4k\") pod \"memcached-0\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " pod="openstack/memcached-0" Nov 25 09:06:39 crc kubenswrapper[4932]: I1125 09:06:39.067168 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 09:06:40 crc kubenswrapper[4932]: I1125 09:06:40.455050 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:06:40 crc kubenswrapper[4932]: I1125 09:06:40.455975 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:06:40 crc kubenswrapper[4932]: I1125 09:06:40.457892 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-s5hd2" Nov 25 09:06:40 crc kubenswrapper[4932]: I1125 09:06:40.469435 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:06:40 crc kubenswrapper[4932]: I1125 09:06:40.488288 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqz6s\" (UniqueName: \"kubernetes.io/projected/5183d0c7-226f-4f06-9687-82b0c0269a5d-kube-api-access-zqz6s\") pod \"kube-state-metrics-0\" (UID: \"5183d0c7-226f-4f06-9687-82b0c0269a5d\") " pod="openstack/kube-state-metrics-0" Nov 25 09:06:40 crc kubenswrapper[4932]: I1125 09:06:40.589838 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqz6s\" (UniqueName: \"kubernetes.io/projected/5183d0c7-226f-4f06-9687-82b0c0269a5d-kube-api-access-zqz6s\") pod \"kube-state-metrics-0\" (UID: \"5183d0c7-226f-4f06-9687-82b0c0269a5d\") " pod="openstack/kube-state-metrics-0" Nov 25 09:06:40 crc kubenswrapper[4932]: I1125 09:06:40.605860 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqz6s\" (UniqueName: \"kubernetes.io/projected/5183d0c7-226f-4f06-9687-82b0c0269a5d-kube-api-access-zqz6s\") pod \"kube-state-metrics-0\" (UID: \"5183d0c7-226f-4f06-9687-82b0c0269a5d\") " pod="openstack/kube-state-metrics-0" Nov 25 09:06:40 crc kubenswrapper[4932]: I1125 09:06:40.772339 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:06:40 crc kubenswrapper[4932]: W1125 09:06:40.856485 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod969d317e_0787_44a8_8e27_554b0e887444.slice/crio-bd3ec1f25bd3e898954df337b54dcdb3d5f65e6189da8a9adbc37a2150d81e01 WatchSource:0}: Error finding container bd3ec1f25bd3e898954df337b54dcdb3d5f65e6189da8a9adbc37a2150d81e01: Status 404 returned error can't find the container with id bd3ec1f25bd3e898954df337b54dcdb3d5f65e6189da8a9adbc37a2150d81e01 Nov 25 09:06:41 crc kubenswrapper[4932]: I1125 09:06:41.662483 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f41b25a4-f48e-4938-9c23-0d89751af6ae","Type":"ContainerStarted","Data":"ebad3f3a2d349b5e6bc3b27737e7776609760d823c543b9b8d9f2ca7bba7d372"} Nov 25 09:06:41 crc kubenswrapper[4932]: I1125 09:06:41.665017 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"969d317e-0787-44a8-8e27-554b0e887444","Type":"ContainerStarted","Data":"bd3ec1f25bd3e898954df337b54dcdb3d5f65e6189da8a9adbc37a2150d81e01"} Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.812645 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.814458 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.818155 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.818260 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-mnrwz" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.818343 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.818498 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.818608 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.819852 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.853057 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.853463 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.853497 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.853519 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-config\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.853539 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.853557 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.853571 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.853591 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44krg\" (UniqueName: \"kubernetes.io/projected/5373bec8-828a-4e9b-b0fd-6a0ef84375de-kube-api-access-44krg\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.955019 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.955079 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.955104 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-config\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.955122 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.955146 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.955162 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.955182 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44krg\" (UniqueName: \"kubernetes.io/projected/5373bec8-828a-4e9b-b0fd-6a0ef84375de-kube-api-access-44krg\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.955243 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.956351 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.956626 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.956887 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.969027 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-config\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.969675 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.977957 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.983852 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:43 crc kubenswrapper[4932]: I1125 09:06:43.997365 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44krg\" (UniqueName: \"kubernetes.io/projected/5373bec8-828a-4e9b-b0fd-6a0ef84375de-kube-api-access-44krg\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:44 crc kubenswrapper[4932]: I1125 09:06:44.016414 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:44 crc kubenswrapper[4932]: I1125 09:06:44.139422 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.170304 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-c26qd"] Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.171577 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.173421 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-9hkvj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.173666 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.173810 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.182291 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c26qd"] Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.224535 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-drcqj"] Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.226558 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.238039 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-drcqj"] Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.275316 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-combined-ca-bundle\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.275389 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-ovn-controller-tls-certs\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.275425 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b15edfd7-749d-45a4-9801-1eba98d77a5e-scripts\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.275458 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run-ovn\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.275506 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.275577 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-log-ovn\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.275607 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbqzt\" (UniqueName: \"kubernetes.io/projected/b15edfd7-749d-45a4-9801-1eba98d77a5e-kube-api-access-hbqzt\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.377886 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftg54\" (UniqueName: \"kubernetes.io/projected/257c86ab-2577-4d46-bdb3-1ec56da0d21e-kube-api-access-ftg54\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.377981 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378065 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-lib\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378100 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-log\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378136 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbqzt\" (UniqueName: \"kubernetes.io/projected/b15edfd7-749d-45a4-9801-1eba98d77a5e-kube-api-access-hbqzt\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378207 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b15edfd7-749d-45a4-9801-1eba98d77a5e-scripts\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378235 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run-ovn\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378274 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-run\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378307 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-etc-ovs\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378331 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/257c86ab-2577-4d46-bdb3-1ec56da0d21e-scripts\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378363 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-log-ovn\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378397 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-combined-ca-bundle\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378432 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-ovn-controller-tls-certs\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378683 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378751 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run-ovn\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.378771 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-log-ovn\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.381721 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b15edfd7-749d-45a4-9801-1eba98d77a5e-scripts\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.384593 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-ovn-controller-tls-certs\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.385525 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-combined-ca-bundle\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.397139 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbqzt\" (UniqueName: \"kubernetes.io/projected/b15edfd7-749d-45a4-9801-1eba98d77a5e-kube-api-access-hbqzt\") pod \"ovn-controller-c26qd\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.480492 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/257c86ab-2577-4d46-bdb3-1ec56da0d21e-scripts\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.480901 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftg54\" (UniqueName: \"kubernetes.io/projected/257c86ab-2577-4d46-bdb3-1ec56da0d21e-kube-api-access-ftg54\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.480943 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-lib\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.480977 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-log\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.481053 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-run\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.481088 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-etc-ovs\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.481505 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-etc-ovs\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.481540 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-lib\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.481619 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-log\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.481674 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-run\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.483210 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/257c86ab-2577-4d46-bdb3-1ec56da0d21e-scripts\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.507002 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.507903 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftg54\" (UniqueName: \"kubernetes.io/projected/257c86ab-2577-4d46-bdb3-1ec56da0d21e-kube-api-access-ftg54\") pod \"ovn-controller-ovs-drcqj\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:45 crc kubenswrapper[4932]: I1125 09:06:45.587639 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.345138 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.349275 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.351251 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.353502 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.353582 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.353698 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-9ng5s" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.355882 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.440872 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.440933 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.441092 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.441150 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.441264 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.441302 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-config\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.441358 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dj7zg\" (UniqueName: \"kubernetes.io/projected/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-kube-api-access-dj7zg\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.441515 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.542917 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.542970 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.542994 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.543089 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.543128 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.543174 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.543304 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-config\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.543500 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.543637 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.544038 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dj7zg\" (UniqueName: \"kubernetes.io/projected/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-kube-api-access-dj7zg\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.544670 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-config\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.545046 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.551177 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.551631 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.551788 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.561389 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dj7zg\" (UniqueName: \"kubernetes.io/projected/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-kube-api-access-dj7zg\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.571167 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:48 crc kubenswrapper[4932]: I1125 09:06:48.678158 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 09:06:53 crc kubenswrapper[4932]: E1125 09:06:53.438053 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 25 09:06:53 crc kubenswrapper[4932]: E1125 09:06:53.439496 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-km22k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7c6d9948dc-xvf9s_openstack(79ba2d28-9208-4051-a300-d8b48a88aafe): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:06:53 crc kubenswrapper[4932]: E1125 09:06:53.440665 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" podUID="79ba2d28-9208-4051-a300-d8b48a88aafe" Nov 25 09:06:53 crc kubenswrapper[4932]: E1125 09:06:53.440674 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 25 09:06:53 crc kubenswrapper[4932]: E1125 09:06:53.440828 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w6d87,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7bdd77c89-4mjtq_openstack(2ca7360e-6db1-4a86-a7f5-c99741b6c847): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:06:53 crc kubenswrapper[4932]: E1125 09:06:53.442008 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" podUID="2ca7360e-6db1-4a86-a7f5-c99741b6c847" Nov 25 09:06:53 crc kubenswrapper[4932]: I1125 09:06:53.643017 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:06:53 crc kubenswrapper[4932]: E1125 09:06:53.648963 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 25 09:06:53 crc kubenswrapper[4932]: E1125 09:06:53.649151 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p7q4j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-6584b49599-htq59_openstack(cd7f1ac4-6492-4fde-ac5b-1dc04db598a8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:06:53 crc kubenswrapper[4932]: E1125 09:06:53.650390 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-6584b49599-htq59" podUID="cd7f1ac4-6492-4fde-ac5b-1dc04db598a8" Nov 25 09:06:53 crc kubenswrapper[4932]: E1125 09:06:53.774156 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba\\\"\"" pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" podUID="79ba2d28-9208-4051-a300-d8b48a88aafe" Nov 25 09:06:54 crc kubenswrapper[4932]: W1125 09:06:54.797994 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2023df73_6a92_4838_8d5e_31f533796950.slice/crio-2b75d501b48082379bb24cd31682856a5721c062d0cf04aeaffea55f2ea1210e WatchSource:0}: Error finding container 2b75d501b48082379bb24cd31682856a5721c062d0cf04aeaffea55f2ea1210e: Status 404 returned error can't find the container with id 2b75d501b48082379bb24cd31682856a5721c062d0cf04aeaffea55f2ea1210e Nov 25 09:06:54 crc kubenswrapper[4932]: I1125 09:06:54.981373 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:54 crc kubenswrapper[4932]: I1125 09:06:54.995639 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.058184 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-dns-svc\") pod \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.058308 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-config\") pod \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.058412 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p7q4j\" (UniqueName: \"kubernetes.io/projected/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-kube-api-access-p7q4j\") pod \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\" (UID: \"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8\") " Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.058825 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cd7f1ac4-6492-4fde-ac5b-1dc04db598a8" (UID: "cd7f1ac4-6492-4fde-ac5b-1dc04db598a8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.058958 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-config" (OuterVolumeSpecName: "config") pod "cd7f1ac4-6492-4fde-ac5b-1dc04db598a8" (UID: "cd7f1ac4-6492-4fde-ac5b-1dc04db598a8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.062941 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-kube-api-access-p7q4j" (OuterVolumeSpecName: "kube-api-access-p7q4j") pod "cd7f1ac4-6492-4fde-ac5b-1dc04db598a8" (UID: "cd7f1ac4-6492-4fde-ac5b-1dc04db598a8"). InnerVolumeSpecName "kube-api-access-p7q4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.160430 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ca7360e-6db1-4a86-a7f5-c99741b6c847-config\") pod \"2ca7360e-6db1-4a86-a7f5-c99741b6c847\" (UID: \"2ca7360e-6db1-4a86-a7f5-c99741b6c847\") " Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.160861 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6d87\" (UniqueName: \"kubernetes.io/projected/2ca7360e-6db1-4a86-a7f5-c99741b6c847-kube-api-access-w6d87\") pod \"2ca7360e-6db1-4a86-a7f5-c99741b6c847\" (UID: \"2ca7360e-6db1-4a86-a7f5-c99741b6c847\") " Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.161122 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ca7360e-6db1-4a86-a7f5-c99741b6c847-config" (OuterVolumeSpecName: "config") pod "2ca7360e-6db1-4a86-a7f5-c99741b6c847" (UID: "2ca7360e-6db1-4a86-a7f5-c99741b6c847"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.161550 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.161650 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.161722 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ca7360e-6db1-4a86-a7f5-c99741b6c847-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.161792 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p7q4j\" (UniqueName: \"kubernetes.io/projected/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8-kube-api-access-p7q4j\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.267935 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.363764 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ca7360e-6db1-4a86-a7f5-c99741b6c847-kube-api-access-w6d87" (OuterVolumeSpecName: "kube-api-access-w6d87") pod "2ca7360e-6db1-4a86-a7f5-c99741b6c847" (UID: "2ca7360e-6db1-4a86-a7f5-c99741b6c847"). InnerVolumeSpecName "kube-api-access-w6d87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.364738 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6d87\" (UniqueName: \"kubernetes.io/projected/2ca7360e-6db1-4a86-a7f5-c99741b6c847-kube-api-access-w6d87\") on node \"crc\" DevicePath \"\"" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.675021 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c26qd"] Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.683762 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:06:55 crc kubenswrapper[4932]: W1125 09:06:55.771816 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8134265d_9da9_4607_8db8_98330608ba4c.slice/crio-3127d6ce8a76318742eeaa12b888412bcee21bd03ffca649914f7968657cb7a9 WatchSource:0}: Error finding container 3127d6ce8a76318742eeaa12b888412bcee21bd03ffca649914f7968657cb7a9: Status 404 returned error can't find the container with id 3127d6ce8a76318742eeaa12b888412bcee21bd03ffca649914f7968657cb7a9 Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.782611 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c26qd" event={"ID":"b15edfd7-749d-45a4-9801-1eba98d77a5e","Type":"ContainerStarted","Data":"e3c0671e83e049c98e6dea9592babef2511da601289ef9e98206d771e6678a14"} Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.784852 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8134265d-9da9-4607-8db8-98330608ba4c","Type":"ContainerStarted","Data":"3127d6ce8a76318742eeaa12b888412bcee21bd03ffca649914f7968657cb7a9"} Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.791856 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cc680bc2-b240-40b6-b77e-c0d264f283b3","Type":"ContainerStarted","Data":"881b0256ab688de0f995a1e9c73f2413abdd38cd31f47c48990007e37cd74934"} Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.797239 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2023df73-6a92-4838-8d5e-31f533796950","Type":"ContainerStarted","Data":"2b75d501b48082379bb24cd31682856a5721c062d0cf04aeaffea55f2ea1210e"} Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.798336 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.798874 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-htq59" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.798906 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-htq59" event={"ID":"cd7f1ac4-6492-4fde-ac5b-1dc04db598a8","Type":"ContainerDied","Data":"8c257c40454cbffb399a57c9984dab18e5222e2e5df01413343368a94b1794c8"} Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.800981 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.804898 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-4mjtq" event={"ID":"2ca7360e-6db1-4a86-a7f5-c99741b6c847","Type":"ContainerDied","Data":"9edd74e2b02b6c7c00ebb9de2923a76021e7a5f48fe9f60444593a79c28883fd"} Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.810314 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.821574 4932 generic.go:334] "Generic (PLEG): container finished" podID="8f744988-197a-4134-8488-549633bf4dc8" containerID="3e07e73eace2087474ef6a2d649630b938e7539e202ebb160423573f98959e58" exitCode=0 Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.821618 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" event={"ID":"8f744988-197a-4134-8488-549633bf4dc8","Type":"ContainerDied","Data":"3e07e73eace2087474ef6a2d649630b938e7539e202ebb160423573f98959e58"} Nov 25 09:06:55 crc kubenswrapper[4932]: W1125 09:06:55.834916 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4bc00f1_7938_42cf_9e1b_3bd8b4b6d7ce.slice/crio-8082e61f9afe6b88dfe1a633a0d10565d2bcd3ef76138c30a9abdc556141a656 WatchSource:0}: Error finding container 8082e61f9afe6b88dfe1a633a0d10565d2bcd3ef76138c30a9abdc556141a656: Status 404 returned error can't find the container with id 8082e61f9afe6b88dfe1a633a0d10565d2bcd3ef76138c30a9abdc556141a656 Nov 25 09:06:55 crc kubenswrapper[4932]: I1125 09:06:55.899948 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:55.999754 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-drcqj"] Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.033031 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-4mjtq"] Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.040617 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-4mjtq"] Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.058450 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-htq59"] Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.069575 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-htq59"] Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.616778 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ca7360e-6db1-4a86-a7f5-c99741b6c847" path="/var/lib/kubelet/pods/2ca7360e-6db1-4a86-a7f5-c99741b6c847/volumes" Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.618036 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd7f1ac4-6492-4fde-ac5b-1dc04db598a8" path="/var/lib/kubelet/pods/cd7f1ac4-6492-4fde-ac5b-1dc04db598a8/volumes" Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.836554 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce","Type":"ContainerStarted","Data":"8082e61f9afe6b88dfe1a633a0d10565d2bcd3ef76138c30a9abdc556141a656"} Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.838874 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5183d0c7-226f-4f06-9687-82b0c0269a5d","Type":"ContainerStarted","Data":"a98689e4c03aa93ce7b0b315d7b88b982013d69640532d6faa686912be1e18b4"} Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.840077 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-drcqj" event={"ID":"257c86ab-2577-4d46-bdb3-1ec56da0d21e","Type":"ContainerStarted","Data":"03fd4299e59567672b47d27cf05ce679f7e2241bbc02e1ffda50ff705571054d"} Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.843039 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f41b25a4-f48e-4938-9c23-0d89751af6ae","Type":"ContainerStarted","Data":"cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126"} Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.845356 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5373bec8-828a-4e9b-b0fd-6a0ef84375de","Type":"ContainerStarted","Data":"c2bf490555c3e5cccee9eaf846bab33c361726b160f9da6ba6fa94375404b147"} Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.849179 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" event={"ID":"8f744988-197a-4134-8488-549633bf4dc8","Type":"ContainerStarted","Data":"2cd45f3f3d10c7197bb58b86d44b7ff2436d7c858de6d530c4fbebcb731920c6"} Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.849505 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.850647 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"969d317e-0787-44a8-8e27-554b0e887444","Type":"ContainerStarted","Data":"a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31"} Nov 25 09:06:56 crc kubenswrapper[4932]: I1125 09:06:56.890712 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" podStartSLOduration=3.751135433 podStartE2EDuration="22.889371642s" podCreationTimestamp="2025-11-25 09:06:34 +0000 UTC" firstStartedPulling="2025-11-25 09:06:35.782382523 +0000 UTC m=+1055.908412086" lastFinishedPulling="2025-11-25 09:06:54.920618732 +0000 UTC m=+1075.046648295" observedRunningTime="2025-11-25 09:06:56.886057451 +0000 UTC m=+1077.012087024" watchObservedRunningTime="2025-11-25 09:06:56.889371642 +0000 UTC m=+1077.015401205" Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.891963 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2023df73-6a92-4838-8d5e-31f533796950","Type":"ContainerStarted","Data":"29ea220a319155118ee72222ef7879f0c6f85a5fbe3eeb194a6b2229582758e2"} Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.894170 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5373bec8-828a-4e9b-b0fd-6a0ef84375de","Type":"ContainerStarted","Data":"1b7fb306f61206aff751cf1adbf835164dd03eeceaa44f76421e2b0575c75592"} Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.895478 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-drcqj" event={"ID":"257c86ab-2577-4d46-bdb3-1ec56da0d21e","Type":"ContainerStarted","Data":"b3d7606c018f09548171e8fb637356b80b8ace12213f992a7cbed584f9881e91"} Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.897683 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c26qd" event={"ID":"b15edfd7-749d-45a4-9801-1eba98d77a5e","Type":"ContainerStarted","Data":"4bdba0a0070629dc89bd75eb2cd967b02a72c0ae20ab32bae70fe717dc0a8d8d"} Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.897801 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-c26qd" Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.899556 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce","Type":"ContainerStarted","Data":"cd839751f73e93f33c82ece92bdaf68a46775b2428ad48d61c20238e06cf889d"} Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.900747 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5183d0c7-226f-4f06-9687-82b0c0269a5d","Type":"ContainerStarted","Data":"e9ffc1df49b2e958be7f09a6213f1511293f7f7b12bb6eeba3a2baf502b8d076"} Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.900867 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.902243 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8134265d-9da9-4607-8db8-98330608ba4c","Type":"ContainerStarted","Data":"659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd"} Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.909036 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cc680bc2-b240-40b6-b77e-c0d264f283b3","Type":"ContainerStarted","Data":"24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e"} Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.909357 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 25 09:07:02 crc kubenswrapper[4932]: I1125 09:07:02.959821 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=16.392786109 podStartE2EDuration="22.959787343s" podCreationTimestamp="2025-11-25 09:06:40 +0000 UTC" firstStartedPulling="2025-11-25 09:06:55.820640832 +0000 UTC m=+1075.946670395" lastFinishedPulling="2025-11-25 09:07:02.387642066 +0000 UTC m=+1082.513671629" observedRunningTime="2025-11-25 09:07:02.957852854 +0000 UTC m=+1083.083882417" watchObservedRunningTime="2025-11-25 09:07:02.959787343 +0000 UTC m=+1083.085816906" Nov 25 09:07:03 crc kubenswrapper[4932]: I1125 09:07:03.003541 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-c26qd" podStartSLOduration=11.386817577 podStartE2EDuration="18.003519687s" podCreationTimestamp="2025-11-25 09:06:45 +0000 UTC" firstStartedPulling="2025-11-25 09:06:55.771266726 +0000 UTC m=+1075.897296289" lastFinishedPulling="2025-11-25 09:07:02.387968836 +0000 UTC m=+1082.513998399" observedRunningTime="2025-11-25 09:07:02.998612737 +0000 UTC m=+1083.124642310" watchObservedRunningTime="2025-11-25 09:07:03.003519687 +0000 UTC m=+1083.129549250" Nov 25 09:07:03 crc kubenswrapper[4932]: I1125 09:07:03.022129 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=22.386188107 podStartE2EDuration="25.022092754s" podCreationTimestamp="2025-11-25 09:06:38 +0000 UTC" firstStartedPulling="2025-11-25 09:06:55.369596841 +0000 UTC m=+1075.495626404" lastFinishedPulling="2025-11-25 09:06:58.005501488 +0000 UTC m=+1078.131531051" observedRunningTime="2025-11-25 09:07:03.014658207 +0000 UTC m=+1083.140687770" watchObservedRunningTime="2025-11-25 09:07:03.022092754 +0000 UTC m=+1083.148122317" Nov 25 09:07:03 crc kubenswrapper[4932]: I1125 09:07:03.918935 4932 generic.go:334] "Generic (PLEG): container finished" podID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerID="b3d7606c018f09548171e8fb637356b80b8ace12213f992a7cbed584f9881e91" exitCode=0 Nov 25 09:07:03 crc kubenswrapper[4932]: I1125 09:07:03.919037 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-drcqj" event={"ID":"257c86ab-2577-4d46-bdb3-1ec56da0d21e","Type":"ContainerDied","Data":"b3d7606c018f09548171e8fb637356b80b8ace12213f992a7cbed584f9881e91"} Nov 25 09:07:04 crc kubenswrapper[4932]: I1125 09:07:04.929892 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-drcqj" event={"ID":"257c86ab-2577-4d46-bdb3-1ec56da0d21e","Type":"ContainerStarted","Data":"120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99"} Nov 25 09:07:05 crc kubenswrapper[4932]: I1125 09:07:05.286391 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:07:05 crc kubenswrapper[4932]: I1125 09:07:05.343612 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-xvf9s"] Nov 25 09:07:05 crc kubenswrapper[4932]: I1125 09:07:05.943690 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:07:05 crc kubenswrapper[4932]: I1125 09:07:05.959335 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-config\") pod \"79ba2d28-9208-4051-a300-d8b48a88aafe\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " Nov 25 09:07:05 crc kubenswrapper[4932]: I1125 09:07:05.960845 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-dns-svc\") pod \"79ba2d28-9208-4051-a300-d8b48a88aafe\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " Nov 25 09:07:05 crc kubenswrapper[4932]: I1125 09:07:05.961022 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-km22k\" (UniqueName: \"kubernetes.io/projected/79ba2d28-9208-4051-a300-d8b48a88aafe-kube-api-access-km22k\") pod \"79ba2d28-9208-4051-a300-d8b48a88aafe\" (UID: \"79ba2d28-9208-4051-a300-d8b48a88aafe\") " Nov 25 09:07:05 crc kubenswrapper[4932]: I1125 09:07:05.961399 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-config" (OuterVolumeSpecName: "config") pod "79ba2d28-9208-4051-a300-d8b48a88aafe" (UID: "79ba2d28-9208-4051-a300-d8b48a88aafe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:05 crc kubenswrapper[4932]: I1125 09:07:05.962714 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "79ba2d28-9208-4051-a300-d8b48a88aafe" (UID: "79ba2d28-9208-4051-a300-d8b48a88aafe"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:05 crc kubenswrapper[4932]: I1125 09:07:05.971971 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79ba2d28-9208-4051-a300-d8b48a88aafe-kube-api-access-km22k" (OuterVolumeSpecName: "kube-api-access-km22k") pod "79ba2d28-9208-4051-a300-d8b48a88aafe" (UID: "79ba2d28-9208-4051-a300-d8b48a88aafe"). InnerVolumeSpecName "kube-api-access-km22k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:05 crc kubenswrapper[4932]: I1125 09:07:05.972881 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" Nov 25 09:07:05 crc kubenswrapper[4932]: I1125 09:07:05.972961 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-xvf9s" event={"ID":"79ba2d28-9208-4051-a300-d8b48a88aafe","Type":"ContainerDied","Data":"29cd03945a44073d2256a7d3d449db2e7a0a4dc26a47d01965ff07ff3a4be283"} Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.068865 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.069340 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-km22k\" (UniqueName: \"kubernetes.io/projected/79ba2d28-9208-4051-a300-d8b48a88aafe-kube-api-access-km22k\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.069490 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79ba2d28-9208-4051-a300-d8b48a88aafe-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.080282 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-xvf9s"] Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.081544 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-xvf9s"] Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.615161 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79ba2d28-9208-4051-a300-d8b48a88aafe" path="/var/lib/kubelet/pods/79ba2d28-9208-4051-a300-d8b48a88aafe/volumes" Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.980145 4932 generic.go:334] "Generic (PLEG): container finished" podID="8134265d-9da9-4607-8db8-98330608ba4c" containerID="659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd" exitCode=0 Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.980251 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8134265d-9da9-4607-8db8-98330608ba4c","Type":"ContainerDied","Data":"659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd"} Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.982993 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-drcqj" event={"ID":"257c86ab-2577-4d46-bdb3-1ec56da0d21e","Type":"ContainerStarted","Data":"18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16"} Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.983232 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.985638 4932 generic.go:334] "Generic (PLEG): container finished" podID="2023df73-6a92-4838-8d5e-31f533796950" containerID="29ea220a319155118ee72222ef7879f0c6f85a5fbe3eeb194a6b2229582758e2" exitCode=0 Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.985720 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2023df73-6a92-4838-8d5e-31f533796950","Type":"ContainerDied","Data":"29ea220a319155118ee72222ef7879f0c6f85a5fbe3eeb194a6b2229582758e2"} Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.989248 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5373bec8-828a-4e9b-b0fd-6a0ef84375de","Type":"ContainerStarted","Data":"4ec3744d9a3e32c5d252c31f008b49edb1884c6bb290d5af4a837ca5bbb374f8"} Nov 25 09:07:06 crc kubenswrapper[4932]: I1125 09:07:06.991451 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce","Type":"ContainerStarted","Data":"3aa7ad743c9d91e2340b3e3408429966ea1670cfd6e520674b81c2217ef12e5e"} Nov 25 09:07:07 crc kubenswrapper[4932]: I1125 09:07:07.074000 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-drcqj" podStartSLOduration=15.867866144 podStartE2EDuration="22.073978116s" podCreationTimestamp="2025-11-25 09:06:45 +0000 UTC" firstStartedPulling="2025-11-25 09:06:56.036930412 +0000 UTC m=+1076.162959975" lastFinishedPulling="2025-11-25 09:07:02.243042384 +0000 UTC m=+1082.369071947" observedRunningTime="2025-11-25 09:07:07.069106007 +0000 UTC m=+1087.195135570" watchObservedRunningTime="2025-11-25 09:07:07.073978116 +0000 UTC m=+1087.200007689" Nov 25 09:07:07 crc kubenswrapper[4932]: I1125 09:07:07.089090 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=10.006636179 podStartE2EDuration="20.089065266s" podCreationTimestamp="2025-11-25 09:06:47 +0000 UTC" firstStartedPulling="2025-11-25 09:06:55.848481622 +0000 UTC m=+1075.974511185" lastFinishedPulling="2025-11-25 09:07:05.930910709 +0000 UTC m=+1086.056940272" observedRunningTime="2025-11-25 09:07:07.088700825 +0000 UTC m=+1087.214730388" watchObservedRunningTime="2025-11-25 09:07:07.089065266 +0000 UTC m=+1087.215094829" Nov 25 09:07:07 crc kubenswrapper[4932]: I1125 09:07:07.122747 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=15.085818306 podStartE2EDuration="25.122721573s" podCreationTimestamp="2025-11-25 09:06:42 +0000 UTC" firstStartedPulling="2025-11-25 09:06:55.907696069 +0000 UTC m=+1076.033725632" lastFinishedPulling="2025-11-25 09:07:05.944599336 +0000 UTC m=+1086.070628899" observedRunningTime="2025-11-25 09:07:07.115280006 +0000 UTC m=+1087.241309589" watchObservedRunningTime="2025-11-25 09:07:07.122721573 +0000 UTC m=+1087.248751156" Nov 25 09:07:07 crc kubenswrapper[4932]: I1125 09:07:07.180732 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:07:07 crc kubenswrapper[4932]: I1125 09:07:07.180885 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:07:08 crc kubenswrapper[4932]: I1125 09:07:08.001126 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2023df73-6a92-4838-8d5e-31f533796950","Type":"ContainerStarted","Data":"bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982"} Nov 25 09:07:08 crc kubenswrapper[4932]: I1125 09:07:08.003296 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8134265d-9da9-4607-8db8-98330608ba4c","Type":"ContainerStarted","Data":"1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541"} Nov 25 09:07:08 crc kubenswrapper[4932]: I1125 09:07:08.003753 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:07:08 crc kubenswrapper[4932]: I1125 09:07:08.022968 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=23.996314894 podStartE2EDuration="31.022942711s" podCreationTimestamp="2025-11-25 09:06:37 +0000 UTC" firstStartedPulling="2025-11-25 09:06:54.824334054 +0000 UTC m=+1074.950363617" lastFinishedPulling="2025-11-25 09:07:01.850961871 +0000 UTC m=+1081.976991434" observedRunningTime="2025-11-25 09:07:08.020133425 +0000 UTC m=+1088.146163068" watchObservedRunningTime="2025-11-25 09:07:08.022942711 +0000 UTC m=+1088.148972264" Nov 25 09:07:08 crc kubenswrapper[4932]: I1125 09:07:08.044565 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=25.467324035 podStartE2EDuration="32.04454861s" podCreationTimestamp="2025-11-25 09:06:36 +0000 UTC" firstStartedPulling="2025-11-25 09:06:55.774272998 +0000 UTC m=+1075.900302561" lastFinishedPulling="2025-11-25 09:07:02.351497573 +0000 UTC m=+1082.477527136" observedRunningTime="2025-11-25 09:07:08.04290061 +0000 UTC m=+1088.168930213" watchObservedRunningTime="2025-11-25 09:07:08.04454861 +0000 UTC m=+1088.170578173" Nov 25 09:07:08 crc kubenswrapper[4932]: I1125 09:07:08.140486 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 25 09:07:08 crc kubenswrapper[4932]: I1125 09:07:08.196546 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 25 09:07:08 crc kubenswrapper[4932]: I1125 09:07:08.618551 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 25 09:07:08 crc kubenswrapper[4932]: I1125 09:07:08.619461 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 25 09:07:08 crc kubenswrapper[4932]: I1125 09:07:08.678333 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.014596 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.068553 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.077474 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.342765 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-c2pbw"] Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.343941 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.345881 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.368421 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-c2pbw"] Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.425444 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-dns-svc\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.425542 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-config\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.425675 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-ovsdbserver-nb\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.425717 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvw92\" (UniqueName: \"kubernetes.io/projected/f366305a-6deb-4f46-af59-e8721b4791d6-kube-api-access-wvw92\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.432021 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-fbk5k"] Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.433293 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.435586 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.443675 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-fbk5k"] Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.526962 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vq8rm\" (UniqueName: \"kubernetes.io/projected/c16a4087-2597-4662-880f-80a7a2a78ef2-kube-api-access-vq8rm\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.527014 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-ovsdbserver-nb\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.527059 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvw92\" (UniqueName: \"kubernetes.io/projected/f366305a-6deb-4f46-af59-e8721b4791d6-kube-api-access-wvw92\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.527163 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-dns-svc\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.527284 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.527413 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovn-rundir\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.527455 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-config\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.527509 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-combined-ca-bundle\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.527566 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovs-rundir\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.527811 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c16a4087-2597-4662-880f-80a7a2a78ef2-config\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.528419 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-ovsdbserver-nb\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.528494 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-dns-svc\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.528520 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-config\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.556420 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvw92\" (UniqueName: \"kubernetes.io/projected/f366305a-6deb-4f46-af59-e8721b4791d6-kube-api-access-wvw92\") pod \"dnsmasq-dns-65c78595c5-c2pbw\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.630737 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-combined-ca-bundle\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.630795 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovs-rundir\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.630866 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c16a4087-2597-4662-880f-80a7a2a78ef2-config\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.631173 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovs-rundir\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.630998 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vq8rm\" (UniqueName: \"kubernetes.io/projected/c16a4087-2597-4662-880f-80a7a2a78ef2-kube-api-access-vq8rm\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.631572 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.631631 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovn-rundir\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.631744 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovn-rundir\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.632606 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c16a4087-2597-4662-880f-80a7a2a78ef2-config\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.635668 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-combined-ca-bundle\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.635931 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.661622 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.662488 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vq8rm\" (UniqueName: \"kubernetes.io/projected/c16a4087-2597-4662-880f-80a7a2a78ef2-kube-api-access-vq8rm\") pod \"ovn-controller-metrics-fbk5k\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.679251 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.738623 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.750479 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.751048 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-c2pbw"] Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.779877 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-cvjh9"] Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.781502 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.785542 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.790700 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-cvjh9"] Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.936762 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.936806 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.936872 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slrvq\" (UniqueName: \"kubernetes.io/projected/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-kube-api-access-slrvq\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.936898 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-config\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:09 crc kubenswrapper[4932]: I1125 09:07:09.936919 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.038812 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slrvq\" (UniqueName: \"kubernetes.io/projected/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-kube-api-access-slrvq\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.039344 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-config\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.039380 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.039459 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.039484 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.040868 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.040919 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.041475 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-config\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.041537 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.060237 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slrvq\" (UniqueName: \"kubernetes.io/projected/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-kube-api-access-slrvq\") pod \"dnsmasq-dns-5c7b6b5695-cvjh9\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.076934 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.109140 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.173572 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-c2pbw"] Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.225629 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.237826 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.237937 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.239960 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.254043 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-f7k82" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.254308 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.254362 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.289118 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-fbk5k"] Nov 25 09:07:10 crc kubenswrapper[4932]: W1125 09:07:10.305832 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc16a4087_2597_4662_880f_80a7a2a78ef2.slice/crio-29d3d7589e73d563616f3011531058bcebdfc971cdd0988bee5b0aca53c07b26 WatchSource:0}: Error finding container 29d3d7589e73d563616f3011531058bcebdfc971cdd0988bee5b0aca53c07b26: Status 404 returned error can't find the container with id 29d3d7589e73d563616f3011531058bcebdfc971cdd0988bee5b0aca53c07b26 Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.349238 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.349719 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-scripts\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.349782 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-config\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.349921 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.350028 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bxn9\" (UniqueName: \"kubernetes.io/projected/c9d818a0-17fd-44a2-8855-a6f847efe274-kube-api-access-5bxn9\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.350063 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.350096 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.452268 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.452407 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-scripts\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.452433 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-config\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.452457 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.452482 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bxn9\" (UniqueName: \"kubernetes.io/projected/c9d818a0-17fd-44a2-8855-a6f847efe274-kube-api-access-5bxn9\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.452500 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.452521 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.453943 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-config\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.454229 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.454712 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-scripts\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.462116 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.462692 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.473815 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.489245 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bxn9\" (UniqueName: \"kubernetes.io/projected/c9d818a0-17fd-44a2-8855-a6f847efe274-kube-api-access-5bxn9\") pod \"ovn-northd-0\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.567760 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.702756 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-cvjh9"] Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.793480 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.823402 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-cvjh9"] Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.842984 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2dmrz"] Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.844393 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.855467 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2dmrz"] Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.962398 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.962624 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-config\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.962675 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.962704 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.962755 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhcgf\" (UniqueName: \"kubernetes.io/projected/db4b005b-e018-4162-8056-be07edd72b71-kube-api-access-bhcgf\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:10 crc kubenswrapper[4932]: I1125 09:07:10.967214 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:07:10 crc kubenswrapper[4932]: W1125 09:07:10.974884 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9d818a0_17fd_44a2_8855_a6f847efe274.slice/crio-e3a0478c158f464f91ac01edc36339ead731fb9020f463add5e4c96c1931c20e WatchSource:0}: Error finding container e3a0478c158f464f91ac01edc36339ead731fb9020f463add5e4c96c1931c20e: Status 404 returned error can't find the container with id e3a0478c158f464f91ac01edc36339ead731fb9020f463add5e4c96c1931c20e Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.034217 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9d818a0-17fd-44a2-8855-a6f847efe274","Type":"ContainerStarted","Data":"e3a0478c158f464f91ac01edc36339ead731fb9020f463add5e4c96c1931c20e"} Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.035704 4932 generic.go:334] "Generic (PLEG): container finished" podID="f366305a-6deb-4f46-af59-e8721b4791d6" containerID="9aa148d51c0a49253e6f47e887bd6fbdafde5b47766c1502cd80c3d633fd513f" exitCode=0 Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.035930 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" event={"ID":"f366305a-6deb-4f46-af59-e8721b4791d6","Type":"ContainerDied","Data":"9aa148d51c0a49253e6f47e887bd6fbdafde5b47766c1502cd80c3d633fd513f"} Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.035955 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" event={"ID":"f366305a-6deb-4f46-af59-e8721b4791d6","Type":"ContainerStarted","Data":"ac39961d04cf8a3d81d2e7e7faae9790a2118e9a61825cc74b51978ba7b57ff0"} Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.041731 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-fbk5k" event={"ID":"c16a4087-2597-4662-880f-80a7a2a78ef2","Type":"ContainerStarted","Data":"29d3d7589e73d563616f3011531058bcebdfc971cdd0988bee5b0aca53c07b26"} Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.043483 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" event={"ID":"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2","Type":"ContainerStarted","Data":"599b3bee184cd316d06cd009f738546170b5dc5fc7f39a0e6e9a678da58cd78f"} Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.064413 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhcgf\" (UniqueName: \"kubernetes.io/projected/db4b005b-e018-4162-8056-be07edd72b71-kube-api-access-bhcgf\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.064489 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.064512 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-config\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.064584 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.064615 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.065753 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.066751 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.067254 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-config\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.068025 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.085146 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhcgf\" (UniqueName: \"kubernetes.io/projected/db4b005b-e018-4162-8056-be07edd72b71-kube-api-access-bhcgf\") pod \"dnsmasq-dns-cf8bcbfcf-2dmrz\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.208889 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.270325 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.369967 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvw92\" (UniqueName: \"kubernetes.io/projected/f366305a-6deb-4f46-af59-e8721b4791d6-kube-api-access-wvw92\") pod \"f366305a-6deb-4f46-af59-e8721b4791d6\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.370279 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-config\") pod \"f366305a-6deb-4f46-af59-e8721b4791d6\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.370321 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-ovsdbserver-nb\") pod \"f366305a-6deb-4f46-af59-e8721b4791d6\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.370392 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-dns-svc\") pod \"f366305a-6deb-4f46-af59-e8721b4791d6\" (UID: \"f366305a-6deb-4f46-af59-e8721b4791d6\") " Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.375468 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f366305a-6deb-4f46-af59-e8721b4791d6-kube-api-access-wvw92" (OuterVolumeSpecName: "kube-api-access-wvw92") pod "f366305a-6deb-4f46-af59-e8721b4791d6" (UID: "f366305a-6deb-4f46-af59-e8721b4791d6"). InnerVolumeSpecName "kube-api-access-wvw92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.397902 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f366305a-6deb-4f46-af59-e8721b4791d6" (UID: "f366305a-6deb-4f46-af59-e8721b4791d6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.404025 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f366305a-6deb-4f46-af59-e8721b4791d6" (UID: "f366305a-6deb-4f46-af59-e8721b4791d6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.404799 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-config" (OuterVolumeSpecName: "config") pod "f366305a-6deb-4f46-af59-e8721b4791d6" (UID: "f366305a-6deb-4f46-af59-e8721b4791d6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.472129 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvw92\" (UniqueName: \"kubernetes.io/projected/f366305a-6deb-4f46-af59-e8721b4791d6-kube-api-access-wvw92\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.472172 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.472182 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.472206 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f366305a-6deb-4f46-af59-e8721b4791d6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.635422 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2dmrz"] Nov 25 09:07:11 crc kubenswrapper[4932]: W1125 09:07:11.640059 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb4b005b_e018_4162_8056_be07edd72b71.slice/crio-0898c8cb2aa48c00a35820ceb0c0645e06fba6cea078c9b37afde0370ffd71b9 WatchSource:0}: Error finding container 0898c8cb2aa48c00a35820ceb0c0645e06fba6cea078c9b37afde0370ffd71b9: Status 404 returned error can't find the container with id 0898c8cb2aa48c00a35820ceb0c0645e06fba6cea078c9b37afde0370ffd71b9 Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.911764 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:07:11 crc kubenswrapper[4932]: E1125 09:07:11.912153 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f366305a-6deb-4f46-af59-e8721b4791d6" containerName="init" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.912171 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f366305a-6deb-4f46-af59-e8721b4791d6" containerName="init" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.912426 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f366305a-6deb-4f46-af59-e8721b4791d6" containerName="init" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.920833 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.927389 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.930431 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.930762 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.930915 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.931052 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-99dfk" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.980778 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-lock\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.981051 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.981138 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-cache\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.981329 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:11 crc kubenswrapper[4932]: I1125 09:07:11.981484 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skkg2\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-kube-api-access-skkg2\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.052991 4932 generic.go:334] "Generic (PLEG): container finished" podID="db4b005b-e018-4162-8056-be07edd72b71" containerID="cbef0b7a9f85a74a3a714541cd7021cd8b427005067e42bb833f83655ebddb19" exitCode=0 Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.053229 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" event={"ID":"db4b005b-e018-4162-8056-be07edd72b71","Type":"ContainerDied","Data":"cbef0b7a9f85a74a3a714541cd7021cd8b427005067e42bb833f83655ebddb19"} Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.053253 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" event={"ID":"db4b005b-e018-4162-8056-be07edd72b71","Type":"ContainerStarted","Data":"0898c8cb2aa48c00a35820ceb0c0645e06fba6cea078c9b37afde0370ffd71b9"} Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.055684 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" event={"ID":"f366305a-6deb-4f46-af59-e8721b4791d6","Type":"ContainerDied","Data":"ac39961d04cf8a3d81d2e7e7faae9790a2118e9a61825cc74b51978ba7b57ff0"} Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.055749 4932 scope.go:117] "RemoveContainer" containerID="9aa148d51c0a49253e6f47e887bd6fbdafde5b47766c1502cd80c3d633fd513f" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.055889 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-c2pbw" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.065323 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-fbk5k" event={"ID":"c16a4087-2597-4662-880f-80a7a2a78ef2","Type":"ContainerStarted","Data":"2e6c2d101453359a62990d88b2bfc484c902180a068c6131e7ba3d8b29699a33"} Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.070673 4932 generic.go:334] "Generic (PLEG): container finished" podID="df2ecb2f-a645-43af-83fc-bfd6c7ba43b2" containerID="cc4bd265fa43bddcf199bfaf5b882583fcaeed73f58dbc92b886212409791761" exitCode=0 Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.070875 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" event={"ID":"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2","Type":"ContainerDied","Data":"cc4bd265fa43bddcf199bfaf5b882583fcaeed73f58dbc92b886212409791761"} Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.082784 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-lock\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.083065 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.083254 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-cache\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.083454 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.083644 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skkg2\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-kube-api-access-skkg2\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: E1125 09:07:12.085037 4932 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:07:12 crc kubenswrapper[4932]: E1125 09:07:12.085076 4932 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:07:12 crc kubenswrapper[4932]: E1125 09:07:12.085151 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift podName:81ccee4a-f414-4007-ae17-b440b55dea5f nodeName:}" failed. No retries permitted until 2025-11-25 09:07:12.585115016 +0000 UTC m=+1092.711144659 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift") pod "swift-storage-0" (UID: "81ccee4a-f414-4007-ae17-b440b55dea5f") : configmap "swift-ring-files" not found Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.085611 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.086815 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-cache\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.086991 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-lock\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.098573 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-fbk5k" podStartSLOduration=3.098555126 podStartE2EDuration="3.098555126s" podCreationTimestamp="2025-11-25 09:07:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:07:12.095554135 +0000 UTC m=+1092.221583698" watchObservedRunningTime="2025-11-25 09:07:12.098555126 +0000 UTC m=+1092.224584689" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.108229 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skkg2\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-kube-api-access-skkg2\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.119083 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.210258 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-c2pbw"] Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.217904 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-c2pbw"] Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.413666 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.597471 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-nb\") pod \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.597783 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-dns-svc\") pod \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.597831 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slrvq\" (UniqueName: \"kubernetes.io/projected/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-kube-api-access-slrvq\") pod \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.597890 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-sb\") pod \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.597920 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-config\") pod \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\" (UID: \"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2\") " Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.598446 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:12 crc kubenswrapper[4932]: E1125 09:07:12.598665 4932 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:07:12 crc kubenswrapper[4932]: E1125 09:07:12.598684 4932 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:07:12 crc kubenswrapper[4932]: E1125 09:07:12.598734 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift podName:81ccee4a-f414-4007-ae17-b440b55dea5f nodeName:}" failed. No retries permitted until 2025-11-25 09:07:13.598716238 +0000 UTC m=+1093.724745801 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift") pod "swift-storage-0" (UID: "81ccee4a-f414-4007-ae17-b440b55dea5f") : configmap "swift-ring-files" not found Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.617088 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-kube-api-access-slrvq" (OuterVolumeSpecName: "kube-api-access-slrvq") pod "df2ecb2f-a645-43af-83fc-bfd6c7ba43b2" (UID: "df2ecb2f-a645-43af-83fc-bfd6c7ba43b2"). InnerVolumeSpecName "kube-api-access-slrvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.622039 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f366305a-6deb-4f46-af59-e8721b4791d6" path="/var/lib/kubelet/pods/f366305a-6deb-4f46-af59-e8721b4791d6/volumes" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.632842 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-config" (OuterVolumeSpecName: "config") pod "df2ecb2f-a645-43af-83fc-bfd6c7ba43b2" (UID: "df2ecb2f-a645-43af-83fc-bfd6c7ba43b2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.632939 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "df2ecb2f-a645-43af-83fc-bfd6c7ba43b2" (UID: "df2ecb2f-a645-43af-83fc-bfd6c7ba43b2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.655486 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "df2ecb2f-a645-43af-83fc-bfd6c7ba43b2" (UID: "df2ecb2f-a645-43af-83fc-bfd6c7ba43b2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.655604 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "df2ecb2f-a645-43af-83fc-bfd6c7ba43b2" (UID: "df2ecb2f-a645-43af-83fc-bfd6c7ba43b2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.700630 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.700661 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.700671 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.700680 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:12 crc kubenswrapper[4932]: I1125 09:07:12.700689 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slrvq\" (UniqueName: \"kubernetes.io/projected/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2-kube-api-access-slrvq\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:13 crc kubenswrapper[4932]: I1125 09:07:13.080701 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" Nov 25 09:07:13 crc kubenswrapper[4932]: I1125 09:07:13.080694 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-cvjh9" event={"ID":"df2ecb2f-a645-43af-83fc-bfd6c7ba43b2","Type":"ContainerDied","Data":"599b3bee184cd316d06cd009f738546170b5dc5fc7f39a0e6e9a678da58cd78f"} Nov 25 09:07:13 crc kubenswrapper[4932]: I1125 09:07:13.080776 4932 scope.go:117] "RemoveContainer" containerID="cc4bd265fa43bddcf199bfaf5b882583fcaeed73f58dbc92b886212409791761" Nov 25 09:07:13 crc kubenswrapper[4932]: I1125 09:07:13.084288 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" event={"ID":"db4b005b-e018-4162-8056-be07edd72b71","Type":"ContainerStarted","Data":"aaeed985b6e5d9b80c261c6498b86f540356061013423e09033041bfc2e3cad0"} Nov 25 09:07:13 crc kubenswrapper[4932]: I1125 09:07:13.084382 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:13 crc kubenswrapper[4932]: I1125 09:07:13.113279 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" podStartSLOduration=3.113252177 podStartE2EDuration="3.113252177s" podCreationTimestamp="2025-11-25 09:07:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:07:13.104625054 +0000 UTC m=+1093.230654617" watchObservedRunningTime="2025-11-25 09:07:13.113252177 +0000 UTC m=+1093.239281760" Nov 25 09:07:13 crc kubenswrapper[4932]: I1125 09:07:13.154179 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-cvjh9"] Nov 25 09:07:13 crc kubenswrapper[4932]: I1125 09:07:13.160274 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-cvjh9"] Nov 25 09:07:13 crc kubenswrapper[4932]: I1125 09:07:13.616084 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:13 crc kubenswrapper[4932]: E1125 09:07:13.616332 4932 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:07:13 crc kubenswrapper[4932]: E1125 09:07:13.616521 4932 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:07:13 crc kubenswrapper[4932]: E1125 09:07:13.616577 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift podName:81ccee4a-f414-4007-ae17-b440b55dea5f nodeName:}" failed. No retries permitted until 2025-11-25 09:07:15.616561043 +0000 UTC m=+1095.742590606 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift") pod "swift-storage-0" (UID: "81ccee4a-f414-4007-ae17-b440b55dea5f") : configmap "swift-ring-files" not found Nov 25 09:07:14 crc kubenswrapper[4932]: I1125 09:07:14.621317 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df2ecb2f-a645-43af-83fc-bfd6c7ba43b2" path="/var/lib/kubelet/pods/df2ecb2f-a645-43af-83fc-bfd6c7ba43b2/volumes" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.652543 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:15 crc kubenswrapper[4932]: E1125 09:07:15.652814 4932 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:07:15 crc kubenswrapper[4932]: E1125 09:07:15.653267 4932 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:07:15 crc kubenswrapper[4932]: E1125 09:07:15.653370 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift podName:81ccee4a-f414-4007-ae17-b440b55dea5f nodeName:}" failed. No retries permitted until 2025-11-25 09:07:19.653342151 +0000 UTC m=+1099.779371714 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift") pod "swift-storage-0" (UID: "81ccee4a-f414-4007-ae17-b440b55dea5f") : configmap "swift-ring-files" not found Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.863147 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-crk46"] Nov 25 09:07:15 crc kubenswrapper[4932]: E1125 09:07:15.863753 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df2ecb2f-a645-43af-83fc-bfd6c7ba43b2" containerName="init" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.863776 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="df2ecb2f-a645-43af-83fc-bfd6c7ba43b2" containerName="init" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.863990 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="df2ecb2f-a645-43af-83fc-bfd6c7ba43b2" containerName="init" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.864591 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.868138 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.868436 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.868583 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.880460 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-crk46"] Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.959440 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-combined-ca-bundle\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.959673 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-swiftconf\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.959717 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78pjm\" (UniqueName: \"kubernetes.io/projected/27edebe8-2def-4a76-8f3d-0039ae29f4c8-kube-api-access-78pjm\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.959758 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-dispersionconf\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.959822 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-scripts\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.959929 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-ring-data-devices\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:15 crc kubenswrapper[4932]: I1125 09:07:15.960004 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/27edebe8-2def-4a76-8f3d-0039ae29f4c8-etc-swift\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.062088 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-swiftconf\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.062161 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78pjm\" (UniqueName: \"kubernetes.io/projected/27edebe8-2def-4a76-8f3d-0039ae29f4c8-kube-api-access-78pjm\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.062216 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-dispersionconf\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.062245 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-scripts\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.062312 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-ring-data-devices\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.062335 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/27edebe8-2def-4a76-8f3d-0039ae29f4c8-etc-swift\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.062358 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-combined-ca-bundle\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.062939 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/27edebe8-2def-4a76-8f3d-0039ae29f4c8-etc-swift\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.063013 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-scripts\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.063399 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-ring-data-devices\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.079586 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-swiftconf\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.101916 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-dispersionconf\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.115828 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-combined-ca-bundle\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.121927 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78pjm\" (UniqueName: \"kubernetes.io/projected/27edebe8-2def-4a76-8f3d-0039ae29f4c8-kube-api-access-78pjm\") pod \"swift-ring-rebalance-crk46\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:16 crc kubenswrapper[4932]: I1125 09:07:16.196717 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:17 crc kubenswrapper[4932]: I1125 09:07:17.438050 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 09:07:17 crc kubenswrapper[4932]: I1125 09:07:17.438550 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 09:07:19 crc kubenswrapper[4932]: I1125 09:07:19.728445 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:19 crc kubenswrapper[4932]: E1125 09:07:19.728636 4932 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:07:19 crc kubenswrapper[4932]: E1125 09:07:19.728867 4932 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:07:19 crc kubenswrapper[4932]: E1125 09:07:19.728952 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift podName:81ccee4a-f414-4007-ae17-b440b55dea5f nodeName:}" failed. No retries permitted until 2025-11-25 09:07:27.728933406 +0000 UTC m=+1107.854962989 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift") pod "swift-storage-0" (UID: "81ccee4a-f414-4007-ae17-b440b55dea5f") : configmap "swift-ring-files" not found Nov 25 09:07:21 crc kubenswrapper[4932]: I1125 09:07:21.210399 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:07:21 crc kubenswrapper[4932]: I1125 09:07:21.270460 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-k44zw"] Nov 25 09:07:21 crc kubenswrapper[4932]: I1125 09:07:21.270740 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" podUID="8f744988-197a-4134-8488-549633bf4dc8" containerName="dnsmasq-dns" containerID="cri-o://2cd45f3f3d10c7197bb58b86d44b7ff2436d7c858de6d530c4fbebcb731920c6" gracePeriod=10 Nov 25 09:07:23 crc kubenswrapper[4932]: I1125 09:07:23.298297 4932 generic.go:334] "Generic (PLEG): container finished" podID="8f744988-197a-4134-8488-549633bf4dc8" containerID="2cd45f3f3d10c7197bb58b86d44b7ff2436d7c858de6d530c4fbebcb731920c6" exitCode=0 Nov 25 09:07:23 crc kubenswrapper[4932]: I1125 09:07:23.298463 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" event={"ID":"8f744988-197a-4134-8488-549633bf4dc8","Type":"ContainerDied","Data":"2cd45f3f3d10c7197bb58b86d44b7ff2436d7c858de6d530c4fbebcb731920c6"} Nov 25 09:07:24 crc kubenswrapper[4932]: I1125 09:07:24.464885 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 09:07:24 crc kubenswrapper[4932]: I1125 09:07:24.555418 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="8134265d-9da9-4607-8db8-98330608ba4c" containerName="galera" probeResult="failure" output=< Nov 25 09:07:24 crc kubenswrapper[4932]: wsrep_local_state_comment (Joined) differs from Synced Nov 25 09:07:24 crc kubenswrapper[4932]: > Nov 25 09:07:25 crc kubenswrapper[4932]: I1125 09:07:25.285601 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" podUID="8f744988-197a-4134-8488-549633bf4dc8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.103:5353: connect: connection refused" Nov 25 09:07:25 crc kubenswrapper[4932]: E1125 09:07:25.934501 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-northd@sha256:3e4ecc02b4b5e0860482a93599ba9ca598c5ce26c093c46e701f96fe51acb208" Nov 25 09:07:25 crc kubenswrapper[4932]: E1125 09:07:25.934750 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-northd,Image:quay.io/podified-antelope-centos9/openstack-ovn-northd@sha256:3e4ecc02b4b5e0860482a93599ba9ca598c5ce26c093c46e701f96fe51acb208,Command:[/usr/bin/ovn-northd],Args:[-vfile:off -vconsole:info --n-threads=1 --ovnnb-db=ssl:ovsdbserver-nb-0.openstack.svc.cluster.local:6641 --ovnsb-db=ssl:ovsdbserver-sb-0.openstack.svc.cluster.local:6642 --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n9fh688h576h54dh5cbhbbh67ch5f6hf7h578h84h54h96hf6hcfh558h7bh548h6fh665hc8h596h678h95h89h5dbh65fhbdh5bfh75h689h8fq,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:certs,Value:n85h54chf9h5fdh656h5d8h64bhddh64h58ch57fh5d5h644h54ch56ch64ch98h545h548h657h5b5h5cchbfh66bhffh679h55h4h66h568hfbh57q,ValueFrom:nil,},EnvVar{Name:certs_metrics,Value:n65h655h579h679hb4h68dh9bh54bh9dh58chf4h5fch64h576hc6h5cdh694h6fh5b4hdbh7dh596h6h686hch5ch5b9hd9h5b8h575h557hddq,ValueFrom:nil,},EnvVar{Name:ovnnorthd-config,Value:n5c8h7ch56bh8dh8hc4h5dch9dh68h6bhb7h598h549h5dbh66fh6bh5b4h5cch5d6h55ch57fhfch588h89h5ddh5d6h65bh65bh8dhc4h67dh569q,ValueFrom:nil,},EnvVar{Name:ovnnorthd-scripts,Value:n664hd8h66ch58dh64hc9h66bhd4h558h697h67bh557hdch664h567h669h555h696h556h556h5fh5bh569hbh665h9dh4h9bh564hc8h5b7h5c4q,ValueFrom:nil,},EnvVar{Name:tls-ca-bundle.pem,Value:n557h54bh5cch7h595hf4h599h6h5f9h5d9h7fh5d8h648hdfh655h6fh5bbhcdh586h9fh55bh688hdbh66fhbfh9dh664h5fch5fdh546h5bch646q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-northd-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-northd-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-northd-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5bxn9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/status_check.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/status_check.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-northd-0_openstack(c9d818a0-17fd-44a2-8855-a6f847efe274): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.003325 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.094180 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 25 09:07:26 crc kubenswrapper[4932]: E1125 09:07:26.188102 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-northd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-northd-0" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.264436 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.319165 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" event={"ID":"8f744988-197a-4134-8488-549633bf4dc8","Type":"ContainerDied","Data":"943d01e8056ceb7457860de82384f76c9d78b5387400bf462f5112111882b20f"} Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.319225 4932 scope.go:117] "RemoveContainer" containerID="2cd45f3f3d10c7197bb58b86d44b7ff2436d7c858de6d530c4fbebcb731920c6" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.319301 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-k44zw" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.321394 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9d818a0-17fd-44a2-8855-a6f847efe274","Type":"ContainerStarted","Data":"385bb0f63503360fe1dd3b8bc517012f4e561bae2dc4d40f0fb11f4b6501c4c1"} Nov 25 09:07:26 crc kubenswrapper[4932]: E1125 09:07:26.322830 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-northd\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-northd@sha256:3e4ecc02b4b5e0860482a93599ba9ca598c5ce26c093c46e701f96fe51acb208\\\"\"" pod="openstack/ovn-northd-0" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.352747 4932 scope.go:117] "RemoveContainer" containerID="3e07e73eace2087474ef6a2d649630b938e7539e202ebb160423573f98959e58" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.365461 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfpnh\" (UniqueName: \"kubernetes.io/projected/8f744988-197a-4134-8488-549633bf4dc8-kube-api-access-gfpnh\") pod \"8f744988-197a-4134-8488-549633bf4dc8\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.365594 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-dns-svc\") pod \"8f744988-197a-4134-8488-549633bf4dc8\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.365713 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-config\") pod \"8f744988-197a-4134-8488-549633bf4dc8\" (UID: \"8f744988-197a-4134-8488-549633bf4dc8\") " Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.371004 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f744988-197a-4134-8488-549633bf4dc8-kube-api-access-gfpnh" (OuterVolumeSpecName: "kube-api-access-gfpnh") pod "8f744988-197a-4134-8488-549633bf4dc8" (UID: "8f744988-197a-4134-8488-549633bf4dc8"). InnerVolumeSpecName "kube-api-access-gfpnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.407444 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8f744988-197a-4134-8488-549633bf4dc8" (UID: "8f744988-197a-4134-8488-549633bf4dc8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.419935 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-config" (OuterVolumeSpecName: "config") pod "8f744988-197a-4134-8488-549633bf4dc8" (UID: "8f744988-197a-4134-8488-549633bf4dc8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.446505 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-crk46"] Nov 25 09:07:26 crc kubenswrapper[4932]: W1125 09:07:26.447722 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27edebe8_2def_4a76_8f3d_0039ae29f4c8.slice/crio-8bdedf2623282f163466bc3f3ca0ecd624b27304d6a61496f048d3d76cae879f WatchSource:0}: Error finding container 8bdedf2623282f163466bc3f3ca0ecd624b27304d6a61496f048d3d76cae879f: Status 404 returned error can't find the container with id 8bdedf2623282f163466bc3f3ca0ecd624b27304d6a61496f048d3d76cae879f Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.468258 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfpnh\" (UniqueName: \"kubernetes.io/projected/8f744988-197a-4134-8488-549633bf4dc8-kube-api-access-gfpnh\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.469176 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.469199 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f744988-197a-4134-8488-549633bf4dc8-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.655218 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-k44zw"] Nov 25 09:07:26 crc kubenswrapper[4932]: I1125 09:07:26.661677 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-k44zw"] Nov 25 09:07:27 crc kubenswrapper[4932]: I1125 09:07:27.333135 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-crk46" event={"ID":"27edebe8-2def-4a76-8f3d-0039ae29f4c8","Type":"ContainerStarted","Data":"8bdedf2623282f163466bc3f3ca0ecd624b27304d6a61496f048d3d76cae879f"} Nov 25 09:07:27 crc kubenswrapper[4932]: E1125 09:07:27.334896 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-northd\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-northd@sha256:3e4ecc02b4b5e0860482a93599ba9ca598c5ce26c093c46e701f96fe51acb208\\\"\"" pod="openstack/ovn-northd-0" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" Nov 25 09:07:27 crc kubenswrapper[4932]: I1125 09:07:27.516651 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 09:07:27 crc kubenswrapper[4932]: I1125 09:07:27.789441 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:27 crc kubenswrapper[4932]: E1125 09:07:27.789640 4932 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 09:07:27 crc kubenswrapper[4932]: E1125 09:07:27.789654 4932 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 09:07:27 crc kubenswrapper[4932]: E1125 09:07:27.789696 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift podName:81ccee4a-f414-4007-ae17-b440b55dea5f nodeName:}" failed. No retries permitted until 2025-11-25 09:07:43.789680757 +0000 UTC m=+1123.915710320 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift") pod "swift-storage-0" (UID: "81ccee4a-f414-4007-ae17-b440b55dea5f") : configmap "swift-ring-files" not found Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.342350 4932 generic.go:334] "Generic (PLEG): container finished" podID="f41b25a4-f48e-4938-9c23-0d89751af6ae" containerID="cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126" exitCode=0 Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.342387 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f41b25a4-f48e-4938-9c23-0d89751af6ae","Type":"ContainerDied","Data":"cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126"} Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.616055 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f744988-197a-4134-8488-549633bf4dc8" path="/var/lib/kubelet/pods/8f744988-197a-4134-8488-549633bf4dc8/volumes" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.724653 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bac1-account-create-ntphl"] Nov 25 09:07:28 crc kubenswrapper[4932]: E1125 09:07:28.725025 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f744988-197a-4134-8488-549633bf4dc8" containerName="init" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.725047 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f744988-197a-4134-8488-549633bf4dc8" containerName="init" Nov 25 09:07:28 crc kubenswrapper[4932]: E1125 09:07:28.725078 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f744988-197a-4134-8488-549633bf4dc8" containerName="dnsmasq-dns" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.725087 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f744988-197a-4134-8488-549633bf4dc8" containerName="dnsmasq-dns" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.725500 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f744988-197a-4134-8488-549633bf4dc8" containerName="dnsmasq-dns" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.726320 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bac1-account-create-ntphl" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.733585 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.750741 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bac1-account-create-ntphl"] Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.759839 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-jlfk4"] Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.761260 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jlfk4" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.806278 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-jlfk4"] Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.808702 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd85p\" (UniqueName: \"kubernetes.io/projected/9c8b9eab-5875-4ce0-a580-e82023c14801-kube-api-access-qd85p\") pod \"keystone-bac1-account-create-ntphl\" (UID: \"9c8b9eab-5875-4ce0-a580-e82023c14801\") " pod="openstack/keystone-bac1-account-create-ntphl" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.808801 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c8b9eab-5875-4ce0-a580-e82023c14801-operator-scripts\") pod \"keystone-bac1-account-create-ntphl\" (UID: \"9c8b9eab-5875-4ce0-a580-e82023c14801\") " pod="openstack/keystone-bac1-account-create-ntphl" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.808852 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-operator-scripts\") pod \"keystone-db-create-jlfk4\" (UID: \"a30fe075-e21e-4406-9067-0ca8f5b8d2f3\") " pod="openstack/keystone-db-create-jlfk4" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.808908 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsxw5\" (UniqueName: \"kubernetes.io/projected/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-kube-api-access-bsxw5\") pod \"keystone-db-create-jlfk4\" (UID: \"a30fe075-e21e-4406-9067-0ca8f5b8d2f3\") " pod="openstack/keystone-db-create-jlfk4" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.910892 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd85p\" (UniqueName: \"kubernetes.io/projected/9c8b9eab-5875-4ce0-a580-e82023c14801-kube-api-access-qd85p\") pod \"keystone-bac1-account-create-ntphl\" (UID: \"9c8b9eab-5875-4ce0-a580-e82023c14801\") " pod="openstack/keystone-bac1-account-create-ntphl" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.910980 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c8b9eab-5875-4ce0-a580-e82023c14801-operator-scripts\") pod \"keystone-bac1-account-create-ntphl\" (UID: \"9c8b9eab-5875-4ce0-a580-e82023c14801\") " pod="openstack/keystone-bac1-account-create-ntphl" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.911033 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-operator-scripts\") pod \"keystone-db-create-jlfk4\" (UID: \"a30fe075-e21e-4406-9067-0ca8f5b8d2f3\") " pod="openstack/keystone-db-create-jlfk4" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.911102 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsxw5\" (UniqueName: \"kubernetes.io/projected/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-kube-api-access-bsxw5\") pod \"keystone-db-create-jlfk4\" (UID: \"a30fe075-e21e-4406-9067-0ca8f5b8d2f3\") " pod="openstack/keystone-db-create-jlfk4" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.911998 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c8b9eab-5875-4ce0-a580-e82023c14801-operator-scripts\") pod \"keystone-bac1-account-create-ntphl\" (UID: \"9c8b9eab-5875-4ce0-a580-e82023c14801\") " pod="openstack/keystone-bac1-account-create-ntphl" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.912134 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-operator-scripts\") pod \"keystone-db-create-jlfk4\" (UID: \"a30fe075-e21e-4406-9067-0ca8f5b8d2f3\") " pod="openstack/keystone-db-create-jlfk4" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.930050 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd85p\" (UniqueName: \"kubernetes.io/projected/9c8b9eab-5875-4ce0-a580-e82023c14801-kube-api-access-qd85p\") pod \"keystone-bac1-account-create-ntphl\" (UID: \"9c8b9eab-5875-4ce0-a580-e82023c14801\") " pod="openstack/keystone-bac1-account-create-ntphl" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.932465 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsxw5\" (UniqueName: \"kubernetes.io/projected/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-kube-api-access-bsxw5\") pod \"keystone-db-create-jlfk4\" (UID: \"a30fe075-e21e-4406-9067-0ca8f5b8d2f3\") " pod="openstack/keystone-db-create-jlfk4" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.971230 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-xn7j8"] Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.972365 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-xn7j8" Nov 25 09:07:28 crc kubenswrapper[4932]: I1125 09:07:28.977367 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-xn7j8"] Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.015291 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfqrv\" (UniqueName: \"kubernetes.io/projected/dcf01003-0e2e-4a81-8be7-234708a1caf4-kube-api-access-lfqrv\") pod \"placement-db-create-xn7j8\" (UID: \"dcf01003-0e2e-4a81-8be7-234708a1caf4\") " pod="openstack/placement-db-create-xn7j8" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.015460 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dcf01003-0e2e-4a81-8be7-234708a1caf4-operator-scripts\") pod \"placement-db-create-xn7j8\" (UID: \"dcf01003-0e2e-4a81-8be7-234708a1caf4\") " pod="openstack/placement-db-create-xn7j8" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.091961 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-b0bd-account-create-nmsld"] Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.093491 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b0bd-account-create-nmsld" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.095566 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.101128 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-b0bd-account-create-nmsld"] Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.109262 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bac1-account-create-ntphl" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.116610 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfqrv\" (UniqueName: \"kubernetes.io/projected/dcf01003-0e2e-4a81-8be7-234708a1caf4-kube-api-access-lfqrv\") pod \"placement-db-create-xn7j8\" (UID: \"dcf01003-0e2e-4a81-8be7-234708a1caf4\") " pod="openstack/placement-db-create-xn7j8" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.116930 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dcf01003-0e2e-4a81-8be7-234708a1caf4-operator-scripts\") pod \"placement-db-create-xn7j8\" (UID: \"dcf01003-0e2e-4a81-8be7-234708a1caf4\") " pod="openstack/placement-db-create-xn7j8" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.117323 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jlfk4" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.118182 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dcf01003-0e2e-4a81-8be7-234708a1caf4-operator-scripts\") pod \"placement-db-create-xn7j8\" (UID: \"dcf01003-0e2e-4a81-8be7-234708a1caf4\") " pod="openstack/placement-db-create-xn7j8" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.140204 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfqrv\" (UniqueName: \"kubernetes.io/projected/dcf01003-0e2e-4a81-8be7-234708a1caf4-kube-api-access-lfqrv\") pod \"placement-db-create-xn7j8\" (UID: \"dcf01003-0e2e-4a81-8be7-234708a1caf4\") " pod="openstack/placement-db-create-xn7j8" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.213925 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-lgz2l"] Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.214943 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-lgz2l" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.217989 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dx4gm\" (UniqueName: \"kubernetes.io/projected/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-kube-api-access-dx4gm\") pod \"placement-b0bd-account-create-nmsld\" (UID: \"bc7c8ac5-7063-4937-b0c9-9fcad5484c99\") " pod="openstack/placement-b0bd-account-create-nmsld" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.218141 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-operator-scripts\") pod \"placement-b0bd-account-create-nmsld\" (UID: \"bc7c8ac5-7063-4937-b0c9-9fcad5484c99\") " pod="openstack/placement-b0bd-account-create-nmsld" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.226474 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-lgz2l"] Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.319123 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dx4gm\" (UniqueName: \"kubernetes.io/projected/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-kube-api-access-dx4gm\") pod \"placement-b0bd-account-create-nmsld\" (UID: \"bc7c8ac5-7063-4937-b0c9-9fcad5484c99\") " pod="openstack/placement-b0bd-account-create-nmsld" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.319209 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4675049-b86a-4228-b9f1-c9112c3dd34e-operator-scripts\") pod \"glance-db-create-lgz2l\" (UID: \"a4675049-b86a-4228-b9f1-c9112c3dd34e\") " pod="openstack/glance-db-create-lgz2l" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.319334 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-operator-scripts\") pod \"placement-b0bd-account-create-nmsld\" (UID: \"bc7c8ac5-7063-4937-b0c9-9fcad5484c99\") " pod="openstack/placement-b0bd-account-create-nmsld" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.319389 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5h8s5\" (UniqueName: \"kubernetes.io/projected/a4675049-b86a-4228-b9f1-c9112c3dd34e-kube-api-access-5h8s5\") pod \"glance-db-create-lgz2l\" (UID: \"a4675049-b86a-4228-b9f1-c9112c3dd34e\") " pod="openstack/glance-db-create-lgz2l" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.321682 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-5148-account-create-jzfrs"] Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.323021 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5148-account-create-jzfrs" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.324030 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-operator-scripts\") pod \"placement-b0bd-account-create-nmsld\" (UID: \"bc7c8ac5-7063-4937-b0c9-9fcad5484c99\") " pod="openstack/placement-b0bd-account-create-nmsld" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.325108 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.331467 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-5148-account-create-jzfrs"] Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.336654 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-xn7j8" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.346293 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dx4gm\" (UniqueName: \"kubernetes.io/projected/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-kube-api-access-dx4gm\") pod \"placement-b0bd-account-create-nmsld\" (UID: \"bc7c8ac5-7063-4937-b0c9-9fcad5484c99\") " pod="openstack/placement-b0bd-account-create-nmsld" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.354032 4932 generic.go:334] "Generic (PLEG): container finished" podID="969d317e-0787-44a8-8e27-554b0e887444" containerID="a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31" exitCode=0 Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.354089 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"969d317e-0787-44a8-8e27-554b0e887444","Type":"ContainerDied","Data":"a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31"} Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.409384 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b0bd-account-create-nmsld" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.423129 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4675049-b86a-4228-b9f1-c9112c3dd34e-operator-scripts\") pod \"glance-db-create-lgz2l\" (UID: \"a4675049-b86a-4228-b9f1-c9112c3dd34e\") " pod="openstack/glance-db-create-lgz2l" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.423265 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/283026d1-b94f-47c9-9a9e-3b85e009715a-operator-scripts\") pod \"glance-5148-account-create-jzfrs\" (UID: \"283026d1-b94f-47c9-9a9e-3b85e009715a\") " pod="openstack/glance-5148-account-create-jzfrs" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.423324 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v7ll\" (UniqueName: \"kubernetes.io/projected/283026d1-b94f-47c9-9a9e-3b85e009715a-kube-api-access-4v7ll\") pod \"glance-5148-account-create-jzfrs\" (UID: \"283026d1-b94f-47c9-9a9e-3b85e009715a\") " pod="openstack/glance-5148-account-create-jzfrs" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.423441 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5h8s5\" (UniqueName: \"kubernetes.io/projected/a4675049-b86a-4228-b9f1-c9112c3dd34e-kube-api-access-5h8s5\") pod \"glance-db-create-lgz2l\" (UID: \"a4675049-b86a-4228-b9f1-c9112c3dd34e\") " pod="openstack/glance-db-create-lgz2l" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.424506 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4675049-b86a-4228-b9f1-c9112c3dd34e-operator-scripts\") pod \"glance-db-create-lgz2l\" (UID: \"a4675049-b86a-4228-b9f1-c9112c3dd34e\") " pod="openstack/glance-db-create-lgz2l" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.441772 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5h8s5\" (UniqueName: \"kubernetes.io/projected/a4675049-b86a-4228-b9f1-c9112c3dd34e-kube-api-access-5h8s5\") pod \"glance-db-create-lgz2l\" (UID: \"a4675049-b86a-4228-b9f1-c9112c3dd34e\") " pod="openstack/glance-db-create-lgz2l" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.525614 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/283026d1-b94f-47c9-9a9e-3b85e009715a-operator-scripts\") pod \"glance-5148-account-create-jzfrs\" (UID: \"283026d1-b94f-47c9-9a9e-3b85e009715a\") " pod="openstack/glance-5148-account-create-jzfrs" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.525672 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v7ll\" (UniqueName: \"kubernetes.io/projected/283026d1-b94f-47c9-9a9e-3b85e009715a-kube-api-access-4v7ll\") pod \"glance-5148-account-create-jzfrs\" (UID: \"283026d1-b94f-47c9-9a9e-3b85e009715a\") " pod="openstack/glance-5148-account-create-jzfrs" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.526913 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/283026d1-b94f-47c9-9a9e-3b85e009715a-operator-scripts\") pod \"glance-5148-account-create-jzfrs\" (UID: \"283026d1-b94f-47c9-9a9e-3b85e009715a\") " pod="openstack/glance-5148-account-create-jzfrs" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.535781 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-lgz2l" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.541778 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v7ll\" (UniqueName: \"kubernetes.io/projected/283026d1-b94f-47c9-9a9e-3b85e009715a-kube-api-access-4v7ll\") pod \"glance-5148-account-create-jzfrs\" (UID: \"283026d1-b94f-47c9-9a9e-3b85e009715a\") " pod="openstack/glance-5148-account-create-jzfrs" Nov 25 09:07:29 crc kubenswrapper[4932]: I1125 09:07:29.643970 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5148-account-create-jzfrs" Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.232410 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-b0bd-account-create-nmsld"] Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.266232 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-lgz2l"] Nov 25 09:07:30 crc kubenswrapper[4932]: W1125 09:07:30.281889 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4675049_b86a_4228_b9f1_c9112c3dd34e.slice/crio-f2765c35f52cbc1731e71348c79e45517eb6cc8856b4de1370e8b62510b65025 WatchSource:0}: Error finding container f2765c35f52cbc1731e71348c79e45517eb6cc8856b4de1370e8b62510b65025: Status 404 returned error can't find the container with id f2765c35f52cbc1731e71348c79e45517eb6cc8856b4de1370e8b62510b65025 Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.284216 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bac1-account-create-ntphl"] Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.365345 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bac1-account-create-ntphl" event={"ID":"9c8b9eab-5875-4ce0-a580-e82023c14801","Type":"ContainerStarted","Data":"ec0f14bba5fdd0183806db1d3d84769fb9ad3f8d0eab9a257aef67170da5efab"} Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.367869 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f41b25a4-f48e-4938-9c23-0d89751af6ae","Type":"ContainerStarted","Data":"f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2"} Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.369201 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.374870 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-lgz2l" event={"ID":"a4675049-b86a-4228-b9f1-c9112c3dd34e","Type":"ContainerStarted","Data":"f2765c35f52cbc1731e71348c79e45517eb6cc8856b4de1370e8b62510b65025"} Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.377184 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"969d317e-0787-44a8-8e27-554b0e887444","Type":"ContainerStarted","Data":"86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a"} Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.378221 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.381337 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-crk46" event={"ID":"27edebe8-2def-4a76-8f3d-0039ae29f4c8","Type":"ContainerStarted","Data":"e0b8bc681c7a05963d3036a4a876cb360601a52500fc470f6948f123c43bc3f7"} Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.383324 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b0bd-account-create-nmsld" event={"ID":"bc7c8ac5-7063-4937-b0c9-9fcad5484c99","Type":"ContainerStarted","Data":"a210f6aee388e3f0d58de14c822907928384bcee9dab9eba13f3f4a84087f2d6"} Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.425366 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=42.262862252 podStartE2EDuration="56.425347499s" podCreationTimestamp="2025-11-25 09:06:34 +0000 UTC" firstStartedPulling="2025-11-25 09:06:40.851042159 +0000 UTC m=+1060.977071722" lastFinishedPulling="2025-11-25 09:06:55.013527406 +0000 UTC m=+1075.139556969" observedRunningTime="2025-11-25 09:07:30.407858387 +0000 UTC m=+1110.533887960" watchObservedRunningTime="2025-11-25 09:07:30.425347499 +0000 UTC m=+1110.551377062" Nov 25 09:07:30 crc kubenswrapper[4932]: W1125 09:07:30.434990 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda30fe075_e21e_4406_9067_0ca8f5b8d2f3.slice/crio-e0a720876b5b111d07058c26722f765d02ee1218b77e792078f145d631fa6cc5 WatchSource:0}: Error finding container e0a720876b5b111d07058c26722f765d02ee1218b77e792078f145d631fa6cc5: Status 404 returned error can't find the container with id e0a720876b5b111d07058c26722f765d02ee1218b77e792078f145d631fa6cc5 Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.438951 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-xn7j8"] Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.447102 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-jlfk4"] Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.464724 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-5148-account-create-jzfrs"] Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.486055 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=42.424028323 podStartE2EDuration="56.486033424s" podCreationTimestamp="2025-11-25 09:06:34 +0000 UTC" firstStartedPulling="2025-11-25 09:06:40.864591583 +0000 UTC m=+1060.990621146" lastFinishedPulling="2025-11-25 09:06:54.926596684 +0000 UTC m=+1075.052626247" observedRunningTime="2025-11-25 09:07:30.435865517 +0000 UTC m=+1110.561895100" watchObservedRunningTime="2025-11-25 09:07:30.486033424 +0000 UTC m=+1110.612062987" Nov 25 09:07:30 crc kubenswrapper[4932]: I1125 09:07:30.502031 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-crk46" podStartSLOduration=12.184477463 podStartE2EDuration="15.502009601s" podCreationTimestamp="2025-11-25 09:07:15 +0000 UTC" firstStartedPulling="2025-11-25 09:07:26.449920606 +0000 UTC m=+1106.575950169" lastFinishedPulling="2025-11-25 09:07:29.767452744 +0000 UTC m=+1109.893482307" observedRunningTime="2025-11-25 09:07:30.449930038 +0000 UTC m=+1110.575959601" watchObservedRunningTime="2025-11-25 09:07:30.502009601 +0000 UTC m=+1110.628039164" Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.393055 4932 generic.go:334] "Generic (PLEG): container finished" podID="dcf01003-0e2e-4a81-8be7-234708a1caf4" containerID="6cea57533e20d403e6c8f694564639eb3b31680060a5f8aa034062f0c2af90c0" exitCode=0 Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.393138 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-xn7j8" event={"ID":"dcf01003-0e2e-4a81-8be7-234708a1caf4","Type":"ContainerDied","Data":"6cea57533e20d403e6c8f694564639eb3b31680060a5f8aa034062f0c2af90c0"} Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.393444 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-xn7j8" event={"ID":"dcf01003-0e2e-4a81-8be7-234708a1caf4","Type":"ContainerStarted","Data":"d9a895ac1494bee31f0ce25d8951286b2b2e19358c9b53c1a719d87c1657d74d"} Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.395239 4932 generic.go:334] "Generic (PLEG): container finished" podID="9c8b9eab-5875-4ce0-a580-e82023c14801" containerID="d05bcefea7f8aa60e1f076a234efeb904bc9f1cf040c4db74790ff54cf400d60" exitCode=0 Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.395312 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bac1-account-create-ntphl" event={"ID":"9c8b9eab-5875-4ce0-a580-e82023c14801","Type":"ContainerDied","Data":"d05bcefea7f8aa60e1f076a234efeb904bc9f1cf040c4db74790ff54cf400d60"} Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.396999 4932 generic.go:334] "Generic (PLEG): container finished" podID="a4675049-b86a-4228-b9f1-c9112c3dd34e" containerID="a35354a8de08cea5ce51c600660523c41a46a48b0a5773eb2e39562647569bde" exitCode=0 Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.397072 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-lgz2l" event={"ID":"a4675049-b86a-4228-b9f1-c9112c3dd34e","Type":"ContainerDied","Data":"a35354a8de08cea5ce51c600660523c41a46a48b0a5773eb2e39562647569bde"} Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.398878 4932 generic.go:334] "Generic (PLEG): container finished" podID="a30fe075-e21e-4406-9067-0ca8f5b8d2f3" containerID="0d6aad4f35e29b72e657fd363bd7fc014cdc67271ceea4aafd0c3a3f5e8823a6" exitCode=0 Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.398928 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-jlfk4" event={"ID":"a30fe075-e21e-4406-9067-0ca8f5b8d2f3","Type":"ContainerDied","Data":"0d6aad4f35e29b72e657fd363bd7fc014cdc67271ceea4aafd0c3a3f5e8823a6"} Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.398955 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-jlfk4" event={"ID":"a30fe075-e21e-4406-9067-0ca8f5b8d2f3","Type":"ContainerStarted","Data":"e0a720876b5b111d07058c26722f765d02ee1218b77e792078f145d631fa6cc5"} Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.400320 4932 generic.go:334] "Generic (PLEG): container finished" podID="283026d1-b94f-47c9-9a9e-3b85e009715a" containerID="00a6d2bdb345402225b10a7c036b02b9a0fa2648af0319f26198095fb2c8e5e9" exitCode=0 Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.400377 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5148-account-create-jzfrs" event={"ID":"283026d1-b94f-47c9-9a9e-3b85e009715a","Type":"ContainerDied","Data":"00a6d2bdb345402225b10a7c036b02b9a0fa2648af0319f26198095fb2c8e5e9"} Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.400425 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5148-account-create-jzfrs" event={"ID":"283026d1-b94f-47c9-9a9e-3b85e009715a","Type":"ContainerStarted","Data":"37334520e1b6625b4b3e17a3e8c60f9ce2582e29bcade05f95c07933bb2d3297"} Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.402095 4932 generic.go:334] "Generic (PLEG): container finished" podID="bc7c8ac5-7063-4937-b0c9-9fcad5484c99" containerID="4d0ab2b44271d534e37d3f273c78fe12c0481dffe52307dead0cfca9ed609e63" exitCode=0 Nov 25 09:07:31 crc kubenswrapper[4932]: I1125 09:07:31.402163 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b0bd-account-create-nmsld" event={"ID":"bc7c8ac5-7063-4937-b0c9-9fcad5484c99","Type":"ContainerDied","Data":"4d0ab2b44271d534e37d3f273c78fe12c0481dffe52307dead0cfca9ed609e63"} Nov 25 09:07:32 crc kubenswrapper[4932]: I1125 09:07:32.867022 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5148-account-create-jzfrs" Nov 25 09:07:32 crc kubenswrapper[4932]: I1125 09:07:32.913832 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/283026d1-b94f-47c9-9a9e-3b85e009715a-operator-scripts\") pod \"283026d1-b94f-47c9-9a9e-3b85e009715a\" (UID: \"283026d1-b94f-47c9-9a9e-3b85e009715a\") " Nov 25 09:07:32 crc kubenswrapper[4932]: I1125 09:07:32.914805 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4v7ll\" (UniqueName: \"kubernetes.io/projected/283026d1-b94f-47c9-9a9e-3b85e009715a-kube-api-access-4v7ll\") pod \"283026d1-b94f-47c9-9a9e-3b85e009715a\" (UID: \"283026d1-b94f-47c9-9a9e-3b85e009715a\") " Nov 25 09:07:32 crc kubenswrapper[4932]: I1125 09:07:32.914619 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/283026d1-b94f-47c9-9a9e-3b85e009715a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "283026d1-b94f-47c9-9a9e-3b85e009715a" (UID: "283026d1-b94f-47c9-9a9e-3b85e009715a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:32 crc kubenswrapper[4932]: I1125 09:07:32.923105 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/283026d1-b94f-47c9-9a9e-3b85e009715a-kube-api-access-4v7ll" (OuterVolumeSpecName: "kube-api-access-4v7ll") pod "283026d1-b94f-47c9-9a9e-3b85e009715a" (UID: "283026d1-b94f-47c9-9a9e-3b85e009715a"). InnerVolumeSpecName "kube-api-access-4v7ll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.017320 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/283026d1-b94f-47c9-9a9e-3b85e009715a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.017351 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4v7ll\" (UniqueName: \"kubernetes.io/projected/283026d1-b94f-47c9-9a9e-3b85e009715a-kube-api-access-4v7ll\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.121058 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jlfk4" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.126993 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-xn7j8" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.134922 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-lgz2l" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.151941 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bac1-account-create-ntphl" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.161890 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b0bd-account-create-nmsld" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.219560 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dcf01003-0e2e-4a81-8be7-234708a1caf4-operator-scripts\") pod \"dcf01003-0e2e-4a81-8be7-234708a1caf4\" (UID: \"dcf01003-0e2e-4a81-8be7-234708a1caf4\") " Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.219640 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qd85p\" (UniqueName: \"kubernetes.io/projected/9c8b9eab-5875-4ce0-a580-e82023c14801-kube-api-access-qd85p\") pod \"9c8b9eab-5875-4ce0-a580-e82023c14801\" (UID: \"9c8b9eab-5875-4ce0-a580-e82023c14801\") " Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.219677 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dx4gm\" (UniqueName: \"kubernetes.io/projected/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-kube-api-access-dx4gm\") pod \"bc7c8ac5-7063-4937-b0c9-9fcad5484c99\" (UID: \"bc7c8ac5-7063-4937-b0c9-9fcad5484c99\") " Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.219742 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-operator-scripts\") pod \"bc7c8ac5-7063-4937-b0c9-9fcad5484c99\" (UID: \"bc7c8ac5-7063-4937-b0c9-9fcad5484c99\") " Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.219770 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4675049-b86a-4228-b9f1-c9112c3dd34e-operator-scripts\") pod \"a4675049-b86a-4228-b9f1-c9112c3dd34e\" (UID: \"a4675049-b86a-4228-b9f1-c9112c3dd34e\") " Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.219804 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfqrv\" (UniqueName: \"kubernetes.io/projected/dcf01003-0e2e-4a81-8be7-234708a1caf4-kube-api-access-lfqrv\") pod \"dcf01003-0e2e-4a81-8be7-234708a1caf4\" (UID: \"dcf01003-0e2e-4a81-8be7-234708a1caf4\") " Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.219839 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-operator-scripts\") pod \"a30fe075-e21e-4406-9067-0ca8f5b8d2f3\" (UID: \"a30fe075-e21e-4406-9067-0ca8f5b8d2f3\") " Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.219867 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsxw5\" (UniqueName: \"kubernetes.io/projected/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-kube-api-access-bsxw5\") pod \"a30fe075-e21e-4406-9067-0ca8f5b8d2f3\" (UID: \"a30fe075-e21e-4406-9067-0ca8f5b8d2f3\") " Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.219944 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5h8s5\" (UniqueName: \"kubernetes.io/projected/a4675049-b86a-4228-b9f1-c9112c3dd34e-kube-api-access-5h8s5\") pod \"a4675049-b86a-4228-b9f1-c9112c3dd34e\" (UID: \"a4675049-b86a-4228-b9f1-c9112c3dd34e\") " Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.219970 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c8b9eab-5875-4ce0-a580-e82023c14801-operator-scripts\") pod \"9c8b9eab-5875-4ce0-a580-e82023c14801\" (UID: \"9c8b9eab-5875-4ce0-a580-e82023c14801\") " Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.220622 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4675049-b86a-4228-b9f1-c9112c3dd34e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a4675049-b86a-4228-b9f1-c9112c3dd34e" (UID: "a4675049-b86a-4228-b9f1-c9112c3dd34e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.221053 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcf01003-0e2e-4a81-8be7-234708a1caf4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dcf01003-0e2e-4a81-8be7-234708a1caf4" (UID: "dcf01003-0e2e-4a81-8be7-234708a1caf4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.221142 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c8b9eab-5875-4ce0-a580-e82023c14801-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9c8b9eab-5875-4ce0-a580-e82023c14801" (UID: "9c8b9eab-5875-4ce0-a580-e82023c14801"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.221432 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a30fe075-e21e-4406-9067-0ca8f5b8d2f3" (UID: "a30fe075-e21e-4406-9067-0ca8f5b8d2f3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.221681 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bc7c8ac5-7063-4937-b0c9-9fcad5484c99" (UID: "bc7c8ac5-7063-4937-b0c9-9fcad5484c99"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.225444 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-kube-api-access-bsxw5" (OuterVolumeSpecName: "kube-api-access-bsxw5") pod "a30fe075-e21e-4406-9067-0ca8f5b8d2f3" (UID: "a30fe075-e21e-4406-9067-0ca8f5b8d2f3"). InnerVolumeSpecName "kube-api-access-bsxw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.228132 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4675049-b86a-4228-b9f1-c9112c3dd34e-kube-api-access-5h8s5" (OuterVolumeSpecName: "kube-api-access-5h8s5") pod "a4675049-b86a-4228-b9f1-c9112c3dd34e" (UID: "a4675049-b86a-4228-b9f1-c9112c3dd34e"). InnerVolumeSpecName "kube-api-access-5h8s5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.228304 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-kube-api-access-dx4gm" (OuterVolumeSpecName: "kube-api-access-dx4gm") pod "bc7c8ac5-7063-4937-b0c9-9fcad5484c99" (UID: "bc7c8ac5-7063-4937-b0c9-9fcad5484c99"). InnerVolumeSpecName "kube-api-access-dx4gm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.228382 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcf01003-0e2e-4a81-8be7-234708a1caf4-kube-api-access-lfqrv" (OuterVolumeSpecName: "kube-api-access-lfqrv") pod "dcf01003-0e2e-4a81-8be7-234708a1caf4" (UID: "dcf01003-0e2e-4a81-8be7-234708a1caf4"). InnerVolumeSpecName "kube-api-access-lfqrv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.234453 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c8b9eab-5875-4ce0-a580-e82023c14801-kube-api-access-qd85p" (OuterVolumeSpecName: "kube-api-access-qd85p") pod "9c8b9eab-5875-4ce0-a580-e82023c14801" (UID: "9c8b9eab-5875-4ce0-a580-e82023c14801"). InnerVolumeSpecName "kube-api-access-qd85p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.321864 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dcf01003-0e2e-4a81-8be7-234708a1caf4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.321913 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qd85p\" (UniqueName: \"kubernetes.io/projected/9c8b9eab-5875-4ce0-a580-e82023c14801-kube-api-access-qd85p\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.321924 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dx4gm\" (UniqueName: \"kubernetes.io/projected/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-kube-api-access-dx4gm\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.321935 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc7c8ac5-7063-4937-b0c9-9fcad5484c99-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.321947 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4675049-b86a-4228-b9f1-c9112c3dd34e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.321961 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfqrv\" (UniqueName: \"kubernetes.io/projected/dcf01003-0e2e-4a81-8be7-234708a1caf4-kube-api-access-lfqrv\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.321972 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.321983 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsxw5\" (UniqueName: \"kubernetes.io/projected/a30fe075-e21e-4406-9067-0ca8f5b8d2f3-kube-api-access-bsxw5\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.321996 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5h8s5\" (UniqueName: \"kubernetes.io/projected/a4675049-b86a-4228-b9f1-c9112c3dd34e-kube-api-access-5h8s5\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.322007 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c8b9eab-5875-4ce0-a580-e82023c14801-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.417278 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-5148-account-create-jzfrs" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.417273 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-5148-account-create-jzfrs" event={"ID":"283026d1-b94f-47c9-9a9e-3b85e009715a","Type":"ContainerDied","Data":"37334520e1b6625b4b3e17a3e8c60f9ce2582e29bcade05f95c07933bb2d3297"} Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.417374 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="37334520e1b6625b4b3e17a3e8c60f9ce2582e29bcade05f95c07933bb2d3297" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.419902 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-b0bd-account-create-nmsld" event={"ID":"bc7c8ac5-7063-4937-b0c9-9fcad5484c99","Type":"ContainerDied","Data":"a210f6aee388e3f0d58de14c822907928384bcee9dab9eba13f3f4a84087f2d6"} Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.419932 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a210f6aee388e3f0d58de14c822907928384bcee9dab9eba13f3f4a84087f2d6" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.420120 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-b0bd-account-create-nmsld" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.421610 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-xn7j8" event={"ID":"dcf01003-0e2e-4a81-8be7-234708a1caf4","Type":"ContainerDied","Data":"d9a895ac1494bee31f0ce25d8951286b2b2e19358c9b53c1a719d87c1657d74d"} Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.421633 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9a895ac1494bee31f0ce25d8951286b2b2e19358c9b53c1a719d87c1657d74d" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.421659 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-xn7j8" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.423357 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bac1-account-create-ntphl" event={"ID":"9c8b9eab-5875-4ce0-a580-e82023c14801","Type":"ContainerDied","Data":"ec0f14bba5fdd0183806db1d3d84769fb9ad3f8d0eab9a257aef67170da5efab"} Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.423386 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec0f14bba5fdd0183806db1d3d84769fb9ad3f8d0eab9a257aef67170da5efab" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.423394 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bac1-account-create-ntphl" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.424887 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-lgz2l" event={"ID":"a4675049-b86a-4228-b9f1-c9112c3dd34e","Type":"ContainerDied","Data":"f2765c35f52cbc1731e71348c79e45517eb6cc8856b4de1370e8b62510b65025"} Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.424988 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2765c35f52cbc1731e71348c79e45517eb6cc8856b4de1370e8b62510b65025" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.425106 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-lgz2l" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.426430 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-jlfk4" event={"ID":"a30fe075-e21e-4406-9067-0ca8f5b8d2f3","Type":"ContainerDied","Data":"e0a720876b5b111d07058c26722f765d02ee1218b77e792078f145d631fa6cc5"} Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.426452 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0a720876b5b111d07058c26722f765d02ee1218b77e792078f145d631fa6cc5" Nov 25 09:07:33 crc kubenswrapper[4932]: I1125 09:07:33.426477 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-jlfk4" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.459269 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-sqgkm"] Nov 25 09:07:34 crc kubenswrapper[4932]: E1125 09:07:34.459847 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="283026d1-b94f-47c9-9a9e-3b85e009715a" containerName="mariadb-account-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.459859 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="283026d1-b94f-47c9-9a9e-3b85e009715a" containerName="mariadb-account-create" Nov 25 09:07:34 crc kubenswrapper[4932]: E1125 09:07:34.459876 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a30fe075-e21e-4406-9067-0ca8f5b8d2f3" containerName="mariadb-database-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.459881 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a30fe075-e21e-4406-9067-0ca8f5b8d2f3" containerName="mariadb-database-create" Nov 25 09:07:34 crc kubenswrapper[4932]: E1125 09:07:34.459895 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c8b9eab-5875-4ce0-a580-e82023c14801" containerName="mariadb-account-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.459902 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c8b9eab-5875-4ce0-a580-e82023c14801" containerName="mariadb-account-create" Nov 25 09:07:34 crc kubenswrapper[4932]: E1125 09:07:34.459914 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4675049-b86a-4228-b9f1-c9112c3dd34e" containerName="mariadb-database-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.459921 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4675049-b86a-4228-b9f1-c9112c3dd34e" containerName="mariadb-database-create" Nov 25 09:07:34 crc kubenswrapper[4932]: E1125 09:07:34.459932 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc7c8ac5-7063-4937-b0c9-9fcad5484c99" containerName="mariadb-account-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.459938 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc7c8ac5-7063-4937-b0c9-9fcad5484c99" containerName="mariadb-account-create" Nov 25 09:07:34 crc kubenswrapper[4932]: E1125 09:07:34.459949 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcf01003-0e2e-4a81-8be7-234708a1caf4" containerName="mariadb-database-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.459954 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcf01003-0e2e-4a81-8be7-234708a1caf4" containerName="mariadb-database-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.460101 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c8b9eab-5875-4ce0-a580-e82023c14801" containerName="mariadb-account-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.460116 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a30fe075-e21e-4406-9067-0ca8f5b8d2f3" containerName="mariadb-database-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.460126 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcf01003-0e2e-4a81-8be7-234708a1caf4" containerName="mariadb-database-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.460135 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4675049-b86a-4228-b9f1-c9112c3dd34e" containerName="mariadb-database-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.460142 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc7c8ac5-7063-4937-b0c9-9fcad5484c99" containerName="mariadb-account-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.460150 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="283026d1-b94f-47c9-9a9e-3b85e009715a" containerName="mariadb-account-create" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.460671 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.462808 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.466472 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-btq26" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.473451 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-sqgkm"] Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.535932 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-config-data\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.536008 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-combined-ca-bundle\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.536244 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqqmz\" (UniqueName: \"kubernetes.io/projected/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-kube-api-access-jqqmz\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.536361 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-db-sync-config-data\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.638349 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqqmz\" (UniqueName: \"kubernetes.io/projected/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-kube-api-access-jqqmz\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.638418 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-db-sync-config-data\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.638489 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-config-data\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.638518 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-combined-ca-bundle\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.642807 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-config-data\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.642858 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-combined-ca-bundle\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.643522 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-db-sync-config-data\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.656758 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqqmz\" (UniqueName: \"kubernetes.io/projected/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-kube-api-access-jqqmz\") pod \"glance-db-sync-sqgkm\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:34 crc kubenswrapper[4932]: I1125 09:07:34.775776 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-sqgkm" Nov 25 09:07:35 crc kubenswrapper[4932]: I1125 09:07:35.180986 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-sqgkm"] Nov 25 09:07:35 crc kubenswrapper[4932]: I1125 09:07:35.440704 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-sqgkm" event={"ID":"ee8b5a64-5144-4fd9-a7b0-b12d318ababa","Type":"ContainerStarted","Data":"139de3d285e81294dfbc2f10635ac0357bbc9f04a6997419a866d39b32defcc2"} Nov 25 09:07:35 crc kubenswrapper[4932]: I1125 09:07:35.543337 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-c26qd" podUID="b15edfd7-749d-45a4-9801-1eba98d77a5e" containerName="ovn-controller" probeResult="failure" output=< Nov 25 09:07:35 crc kubenswrapper[4932]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 09:07:35 crc kubenswrapper[4932]: > Nov 25 09:07:35 crc kubenswrapper[4932]: I1125 09:07:35.635735 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:07:37 crc kubenswrapper[4932]: I1125 09:07:37.181045 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:07:37 crc kubenswrapper[4932]: I1125 09:07:37.181628 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:07:37 crc kubenswrapper[4932]: I1125 09:07:37.473114 4932 generic.go:334] "Generic (PLEG): container finished" podID="27edebe8-2def-4a76-8f3d-0039ae29f4c8" containerID="e0b8bc681c7a05963d3036a4a876cb360601a52500fc470f6948f123c43bc3f7" exitCode=0 Nov 25 09:07:37 crc kubenswrapper[4932]: I1125 09:07:37.473159 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-crk46" event={"ID":"27edebe8-2def-4a76-8f3d-0039ae29f4c8","Type":"ContainerDied","Data":"e0b8bc681c7a05963d3036a4a876cb360601a52500fc470f6948f123c43bc3f7"} Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.865219 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.912452 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-swiftconf\") pod \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.912509 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-dispersionconf\") pod \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.912584 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-ring-data-devices\") pod \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.912611 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-combined-ca-bundle\") pod \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.912637 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/27edebe8-2def-4a76-8f3d-0039ae29f4c8-etc-swift\") pod \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.912739 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-scripts\") pod \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.912765 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78pjm\" (UniqueName: \"kubernetes.io/projected/27edebe8-2def-4a76-8f3d-0039ae29f4c8-kube-api-access-78pjm\") pod \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\" (UID: \"27edebe8-2def-4a76-8f3d-0039ae29f4c8\") " Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.914114 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "27edebe8-2def-4a76-8f3d-0039ae29f4c8" (UID: "27edebe8-2def-4a76-8f3d-0039ae29f4c8"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.914442 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27edebe8-2def-4a76-8f3d-0039ae29f4c8-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "27edebe8-2def-4a76-8f3d-0039ae29f4c8" (UID: "27edebe8-2def-4a76-8f3d-0039ae29f4c8"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.920792 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "27edebe8-2def-4a76-8f3d-0039ae29f4c8" (UID: "27edebe8-2def-4a76-8f3d-0039ae29f4c8"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.924613 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27edebe8-2def-4a76-8f3d-0039ae29f4c8-kube-api-access-78pjm" (OuterVolumeSpecName: "kube-api-access-78pjm") pod "27edebe8-2def-4a76-8f3d-0039ae29f4c8" (UID: "27edebe8-2def-4a76-8f3d-0039ae29f4c8"). InnerVolumeSpecName "kube-api-access-78pjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.935845 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-scripts" (OuterVolumeSpecName: "scripts") pod "27edebe8-2def-4a76-8f3d-0039ae29f4c8" (UID: "27edebe8-2def-4a76-8f3d-0039ae29f4c8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.952377 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27edebe8-2def-4a76-8f3d-0039ae29f4c8" (UID: "27edebe8-2def-4a76-8f3d-0039ae29f4c8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:07:38 crc kubenswrapper[4932]: I1125 09:07:38.953736 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "27edebe8-2def-4a76-8f3d-0039ae29f4c8" (UID: "27edebe8-2def-4a76-8f3d-0039ae29f4c8"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:07:39 crc kubenswrapper[4932]: I1125 09:07:39.014824 4932 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:39 crc kubenswrapper[4932]: I1125 09:07:39.015248 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:39 crc kubenswrapper[4932]: I1125 09:07:39.015264 4932 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/27edebe8-2def-4a76-8f3d-0039ae29f4c8-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:39 crc kubenswrapper[4932]: I1125 09:07:39.015278 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/27edebe8-2def-4a76-8f3d-0039ae29f4c8-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:39 crc kubenswrapper[4932]: I1125 09:07:39.015290 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78pjm\" (UniqueName: \"kubernetes.io/projected/27edebe8-2def-4a76-8f3d-0039ae29f4c8-kube-api-access-78pjm\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:39 crc kubenswrapper[4932]: I1125 09:07:39.015304 4932 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:39 crc kubenswrapper[4932]: I1125 09:07:39.015315 4932 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/27edebe8-2def-4a76-8f3d-0039ae29f4c8-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:39 crc kubenswrapper[4932]: I1125 09:07:39.514768 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-crk46" event={"ID":"27edebe8-2def-4a76-8f3d-0039ae29f4c8","Type":"ContainerDied","Data":"8bdedf2623282f163466bc3f3ca0ecd624b27304d6a61496f048d3d76cae879f"} Nov 25 09:07:39 crc kubenswrapper[4932]: I1125 09:07:39.514818 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8bdedf2623282f163466bc3f3ca0ecd624b27304d6a61496f048d3d76cae879f" Nov 25 09:07:39 crc kubenswrapper[4932]: I1125 09:07:39.514883 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-crk46" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.549791 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-c26qd" podUID="b15edfd7-749d-45a4-9801-1eba98d77a5e" containerName="ovn-controller" probeResult="failure" output=< Nov 25 09:07:40 crc kubenswrapper[4932]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 09:07:40 crc kubenswrapper[4932]: > Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.630016 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.849869 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-c26qd-config-dhv69"] Nov 25 09:07:40 crc kubenswrapper[4932]: E1125 09:07:40.850594 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27edebe8-2def-4a76-8f3d-0039ae29f4c8" containerName="swift-ring-rebalance" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.850610 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="27edebe8-2def-4a76-8f3d-0039ae29f4c8" containerName="swift-ring-rebalance" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.850776 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="27edebe8-2def-4a76-8f3d-0039ae29f4c8" containerName="swift-ring-rebalance" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.851476 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.854302 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.870338 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c26qd-config-dhv69"] Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.954924 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.954990 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-log-ovn\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.955026 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run-ovn\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.955105 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-scripts\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.955227 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mn6f\" (UniqueName: \"kubernetes.io/projected/f204b314-606c-454b-af05-5568f8f73075-kube-api-access-2mn6f\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:40 crc kubenswrapper[4932]: I1125 09:07:40.955504 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-additional-scripts\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.058313 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-additional-scripts\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.058422 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.058487 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-log-ovn\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.058513 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run-ovn\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.058532 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-scripts\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.058565 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mn6f\" (UniqueName: \"kubernetes.io/projected/f204b314-606c-454b-af05-5568f8f73075-kube-api-access-2mn6f\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.058787 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.058801 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-log-ovn\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.058847 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run-ovn\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.059232 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-additional-scripts\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.061404 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-scripts\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.076021 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mn6f\" (UniqueName: \"kubernetes.io/projected/f204b314-606c-454b-af05-5568f8f73075-kube-api-access-2mn6f\") pod \"ovn-controller-c26qd-config-dhv69\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:41 crc kubenswrapper[4932]: I1125 09:07:41.181385 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:43 crc kubenswrapper[4932]: I1125 09:07:43.802350 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:43 crc kubenswrapper[4932]: I1125 09:07:43.821037 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift\") pod \"swift-storage-0\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " pod="openstack/swift-storage-0" Nov 25 09:07:44 crc kubenswrapper[4932]: I1125 09:07:44.080738 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 09:07:45 crc kubenswrapper[4932]: I1125 09:07:45.555570 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-c26qd" podUID="b15edfd7-749d-45a4-9801-1eba98d77a5e" containerName="ovn-controller" probeResult="failure" output=< Nov 25 09:07:45 crc kubenswrapper[4932]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 09:07:45 crc kubenswrapper[4932]: > Nov 25 09:07:45 crc kubenswrapper[4932]: I1125 09:07:45.964519 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.233442 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-rbz2d"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.236422 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rbz2d" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.247217 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ch62\" (UniqueName: \"kubernetes.io/projected/717d7a0f-3f58-404d-8ffc-c95f75ebd799-kube-api-access-8ch62\") pod \"barbican-db-create-rbz2d\" (UID: \"717d7a0f-3f58-404d-8ffc-c95f75ebd799\") " pod="openstack/barbican-db-create-rbz2d" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.247317 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/717d7a0f-3f58-404d-8ffc-c95f75ebd799-operator-scripts\") pod \"barbican-db-create-rbz2d\" (UID: \"717d7a0f-3f58-404d-8ffc-c95f75ebd799\") " pod="openstack/barbican-db-create-rbz2d" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.269247 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-rbz2d"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.348770 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ch62\" (UniqueName: \"kubernetes.io/projected/717d7a0f-3f58-404d-8ffc-c95f75ebd799-kube-api-access-8ch62\") pod \"barbican-db-create-rbz2d\" (UID: \"717d7a0f-3f58-404d-8ffc-c95f75ebd799\") " pod="openstack/barbican-db-create-rbz2d" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.348858 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/717d7a0f-3f58-404d-8ffc-c95f75ebd799-operator-scripts\") pod \"barbican-db-create-rbz2d\" (UID: \"717d7a0f-3f58-404d-8ffc-c95f75ebd799\") " pod="openstack/barbican-db-create-rbz2d" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.349709 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-8b4d-account-create-j47jt"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.349953 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/717d7a0f-3f58-404d-8ffc-c95f75ebd799-operator-scripts\") pod \"barbican-db-create-rbz2d\" (UID: \"717d7a0f-3f58-404d-8ffc-c95f75ebd799\") " pod="openstack/barbican-db-create-rbz2d" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.350882 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-8b4d-account-create-j47jt" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.355837 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.366108 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-8b4d-account-create-j47jt"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.371115 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.395856 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ch62\" (UniqueName: \"kubernetes.io/projected/717d7a0f-3f58-404d-8ffc-c95f75ebd799-kube-api-access-8ch62\") pod \"barbican-db-create-rbz2d\" (UID: \"717d7a0f-3f58-404d-8ffc-c95f75ebd799\") " pod="openstack/barbican-db-create-rbz2d" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.451206 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58597054-bb2c-440e-88cc-7f969e6ee0bb-operator-scripts\") pod \"barbican-8b4d-account-create-j47jt\" (UID: \"58597054-bb2c-440e-88cc-7f969e6ee0bb\") " pod="openstack/barbican-8b4d-account-create-j47jt" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.451486 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlprz\" (UniqueName: \"kubernetes.io/projected/58597054-bb2c-440e-88cc-7f969e6ee0bb-kube-api-access-rlprz\") pod \"barbican-8b4d-account-create-j47jt\" (UID: \"58597054-bb2c-440e-88cc-7f969e6ee0bb\") " pod="openstack/barbican-8b4d-account-create-j47jt" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.477225 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-29cl8"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.478996 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-29cl8" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.508152 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-29cl8"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.558225 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58597054-bb2c-440e-88cc-7f969e6ee0bb-operator-scripts\") pod \"barbican-8b4d-account-create-j47jt\" (UID: \"58597054-bb2c-440e-88cc-7f969e6ee0bb\") " pod="openstack/barbican-8b4d-account-create-j47jt" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.558285 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlprz\" (UniqueName: \"kubernetes.io/projected/58597054-bb2c-440e-88cc-7f969e6ee0bb-kube-api-access-rlprz\") pod \"barbican-8b4d-account-create-j47jt\" (UID: \"58597054-bb2c-440e-88cc-7f969e6ee0bb\") " pod="openstack/barbican-8b4d-account-create-j47jt" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.568315 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58597054-bb2c-440e-88cc-7f969e6ee0bb-operator-scripts\") pod \"barbican-8b4d-account-create-j47jt\" (UID: \"58597054-bb2c-440e-88cc-7f969e6ee0bb\") " pod="openstack/barbican-8b4d-account-create-j47jt" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.573033 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-71a7-account-create-tg6zv"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.574398 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-71a7-account-create-tg6zv" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.578717 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.598214 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-71a7-account-create-tg6zv"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.603142 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rbz2d" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.632764 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlprz\" (UniqueName: \"kubernetes.io/projected/58597054-bb2c-440e-88cc-7f969e6ee0bb-kube-api-access-rlprz\") pod \"barbican-8b4d-account-create-j47jt\" (UID: \"58597054-bb2c-440e-88cc-7f969e6ee0bb\") " pod="openstack/barbican-8b4d-account-create-j47jt" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.661366 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxx7w\" (UniqueName: \"kubernetes.io/projected/e59aed73-dc42-4763-9c59-075e6206d38a-kube-api-access-kxx7w\") pod \"cinder-db-create-29cl8\" (UID: \"e59aed73-dc42-4763-9c59-075e6206d38a\") " pod="openstack/cinder-db-create-29cl8" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.661782 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e59aed73-dc42-4763-9c59-075e6206d38a-operator-scripts\") pod \"cinder-db-create-29cl8\" (UID: \"e59aed73-dc42-4763-9c59-075e6206d38a\") " pod="openstack/cinder-db-create-29cl8" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.671885 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-8b4d-account-create-j47jt" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.760224 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-dqk99"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.761487 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-dqk99" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.764902 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/948cc941-e4e1-4f79-80e1-c3a9594314fc-operator-scripts\") pod \"cinder-71a7-account-create-tg6zv\" (UID: \"948cc941-e4e1-4f79-80e1-c3a9594314fc\") " pod="openstack/cinder-71a7-account-create-tg6zv" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.764950 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxx7w\" (UniqueName: \"kubernetes.io/projected/e59aed73-dc42-4763-9c59-075e6206d38a-kube-api-access-kxx7w\") pod \"cinder-db-create-29cl8\" (UID: \"e59aed73-dc42-4763-9c59-075e6206d38a\") " pod="openstack/cinder-db-create-29cl8" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.765002 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqg5p\" (UniqueName: \"kubernetes.io/projected/948cc941-e4e1-4f79-80e1-c3a9594314fc-kube-api-access-mqg5p\") pod \"cinder-71a7-account-create-tg6zv\" (UID: \"948cc941-e4e1-4f79-80e1-c3a9594314fc\") " pod="openstack/cinder-71a7-account-create-tg6zv" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.765173 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e59aed73-dc42-4763-9c59-075e6206d38a-operator-scripts\") pod \"cinder-db-create-29cl8\" (UID: \"e59aed73-dc42-4763-9c59-075e6206d38a\") " pod="openstack/cinder-db-create-29cl8" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.766757 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.766956 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.767166 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rgvcq" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.767353 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.767912 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e59aed73-dc42-4763-9c59-075e6206d38a-operator-scripts\") pod \"cinder-db-create-29cl8\" (UID: \"e59aed73-dc42-4763-9c59-075e6206d38a\") " pod="openstack/cinder-db-create-29cl8" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.768410 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-dqk99"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.779256 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-m77q8"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.780334 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-m77q8" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.789266 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-4147-account-create-dgzdd"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.807018 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxx7w\" (UniqueName: \"kubernetes.io/projected/e59aed73-dc42-4763-9c59-075e6206d38a-kube-api-access-kxx7w\") pod \"cinder-db-create-29cl8\" (UID: \"e59aed73-dc42-4763-9c59-075e6206d38a\") " pod="openstack/cinder-db-create-29cl8" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.819310 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-29cl8" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.824091 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4147-account-create-dgzdd" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.826888 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.842758 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-m77q8"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.868228 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-combined-ca-bundle\") pod \"keystone-db-sync-dqk99\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " pod="openstack/keystone-db-sync-dqk99" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.869219 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/948cc941-e4e1-4f79-80e1-c3a9594314fc-operator-scripts\") pod \"cinder-71a7-account-create-tg6zv\" (UID: \"948cc941-e4e1-4f79-80e1-c3a9594314fc\") " pod="openstack/cinder-71a7-account-create-tg6zv" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.869179 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/948cc941-e4e1-4f79-80e1-c3a9594314fc-operator-scripts\") pod \"cinder-71a7-account-create-tg6zv\" (UID: \"948cc941-e4e1-4f79-80e1-c3a9594314fc\") " pod="openstack/cinder-71a7-account-create-tg6zv" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.869513 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xgh8\" (UniqueName: \"kubernetes.io/projected/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-kube-api-access-4xgh8\") pod \"keystone-db-sync-dqk99\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " pod="openstack/keystone-db-sync-dqk99" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.869580 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqg5p\" (UniqueName: \"kubernetes.io/projected/948cc941-e4e1-4f79-80e1-c3a9594314fc-kube-api-access-mqg5p\") pod \"cinder-71a7-account-create-tg6zv\" (UID: \"948cc941-e4e1-4f79-80e1-c3a9594314fc\") " pod="openstack/cinder-71a7-account-create-tg6zv" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.869620 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-config-data\") pod \"keystone-db-sync-dqk99\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " pod="openstack/keystone-db-sync-dqk99" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.888258 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4147-account-create-dgzdd"] Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.910952 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqg5p\" (UniqueName: \"kubernetes.io/projected/948cc941-e4e1-4f79-80e1-c3a9594314fc-kube-api-access-mqg5p\") pod \"cinder-71a7-account-create-tg6zv\" (UID: \"948cc941-e4e1-4f79-80e1-c3a9594314fc\") " pod="openstack/cinder-71a7-account-create-tg6zv" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.971378 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62178c03-bbfb-4b80-b594-3507a4563e0b-operator-scripts\") pod \"neutron-db-create-m77q8\" (UID: \"62178c03-bbfb-4b80-b594-3507a4563e0b\") " pod="openstack/neutron-db-create-m77q8" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.971657 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c205eefb-e35b-43b4-8288-a96280db4b43-operator-scripts\") pod \"neutron-4147-account-create-dgzdd\" (UID: \"c205eefb-e35b-43b4-8288-a96280db4b43\") " pod="openstack/neutron-4147-account-create-dgzdd" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.971780 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xgh8\" (UniqueName: \"kubernetes.io/projected/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-kube-api-access-4xgh8\") pod \"keystone-db-sync-dqk99\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " pod="openstack/keystone-db-sync-dqk99" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.971831 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-config-data\") pod \"keystone-db-sync-dqk99\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " pod="openstack/keystone-db-sync-dqk99" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.971869 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l5jw\" (UniqueName: \"kubernetes.io/projected/c205eefb-e35b-43b4-8288-a96280db4b43-kube-api-access-7l5jw\") pod \"neutron-4147-account-create-dgzdd\" (UID: \"c205eefb-e35b-43b4-8288-a96280db4b43\") " pod="openstack/neutron-4147-account-create-dgzdd" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.971894 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvvkb\" (UniqueName: \"kubernetes.io/projected/62178c03-bbfb-4b80-b594-3507a4563e0b-kube-api-access-zvvkb\") pod \"neutron-db-create-m77q8\" (UID: \"62178c03-bbfb-4b80-b594-3507a4563e0b\") " pod="openstack/neutron-db-create-m77q8" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.971921 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-combined-ca-bundle\") pod \"keystone-db-sync-dqk99\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " pod="openstack/keystone-db-sync-dqk99" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.976467 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-combined-ca-bundle\") pod \"keystone-db-sync-dqk99\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " pod="openstack/keystone-db-sync-dqk99" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.980093 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-config-data\") pod \"keystone-db-sync-dqk99\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " pod="openstack/keystone-db-sync-dqk99" Nov 25 09:07:46 crc kubenswrapper[4932]: I1125 09:07:46.997431 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xgh8\" (UniqueName: \"kubernetes.io/projected/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-kube-api-access-4xgh8\") pod \"keystone-db-sync-dqk99\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " pod="openstack/keystone-db-sync-dqk99" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.073644 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62178c03-bbfb-4b80-b594-3507a4563e0b-operator-scripts\") pod \"neutron-db-create-m77q8\" (UID: \"62178c03-bbfb-4b80-b594-3507a4563e0b\") " pod="openstack/neutron-db-create-m77q8" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.073733 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c205eefb-e35b-43b4-8288-a96280db4b43-operator-scripts\") pod \"neutron-4147-account-create-dgzdd\" (UID: \"c205eefb-e35b-43b4-8288-a96280db4b43\") " pod="openstack/neutron-4147-account-create-dgzdd" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.073877 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l5jw\" (UniqueName: \"kubernetes.io/projected/c205eefb-e35b-43b4-8288-a96280db4b43-kube-api-access-7l5jw\") pod \"neutron-4147-account-create-dgzdd\" (UID: \"c205eefb-e35b-43b4-8288-a96280db4b43\") " pod="openstack/neutron-4147-account-create-dgzdd" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.073895 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvvkb\" (UniqueName: \"kubernetes.io/projected/62178c03-bbfb-4b80-b594-3507a4563e0b-kube-api-access-zvvkb\") pod \"neutron-db-create-m77q8\" (UID: \"62178c03-bbfb-4b80-b594-3507a4563e0b\") " pod="openstack/neutron-db-create-m77q8" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.075138 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62178c03-bbfb-4b80-b594-3507a4563e0b-operator-scripts\") pod \"neutron-db-create-m77q8\" (UID: \"62178c03-bbfb-4b80-b594-3507a4563e0b\") " pod="openstack/neutron-db-create-m77q8" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.075768 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c205eefb-e35b-43b4-8288-a96280db4b43-operator-scripts\") pod \"neutron-4147-account-create-dgzdd\" (UID: \"c205eefb-e35b-43b4-8288-a96280db4b43\") " pod="openstack/neutron-4147-account-create-dgzdd" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.089584 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-dqk99" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.097325 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l5jw\" (UniqueName: \"kubernetes.io/projected/c205eefb-e35b-43b4-8288-a96280db4b43-kube-api-access-7l5jw\") pod \"neutron-4147-account-create-dgzdd\" (UID: \"c205eefb-e35b-43b4-8288-a96280db4b43\") " pod="openstack/neutron-4147-account-create-dgzdd" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.097905 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvvkb\" (UniqueName: \"kubernetes.io/projected/62178c03-bbfb-4b80-b594-3507a4563e0b-kube-api-access-zvvkb\") pod \"neutron-db-create-m77q8\" (UID: \"62178c03-bbfb-4b80-b594-3507a4563e0b\") " pod="openstack/neutron-db-create-m77q8" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.201057 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-71a7-account-create-tg6zv" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.218812 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-m77q8" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.255500 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4147-account-create-dgzdd" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.334930 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-rbz2d"] Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.338728 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c26qd-config-dhv69"] Nov 25 09:07:47 crc kubenswrapper[4932]: W1125 09:07:47.354808 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod717d7a0f_3f58_404d_8ffc_c95f75ebd799.slice/crio-25a199652cc346a2f6f736dcf33511d645c718a0266ac7cda17488a8f4a68640 WatchSource:0}: Error finding container 25a199652cc346a2f6f736dcf33511d645c718a0266ac7cda17488a8f4a68640: Status 404 returned error can't find the container with id 25a199652cc346a2f6f736dcf33511d645c718a0266ac7cda17488a8f4a68640 Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.471943 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-dqk99"] Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.509890 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-8b4d-account-create-j47jt"] Nov 25 09:07:47 crc kubenswrapper[4932]: W1125 09:07:47.517636 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd068cda9_60f0_4802_ae8c_bbb4bb9ac33e.slice/crio-94082d3a227cf71533b583d64cb2fafb87b3f0eaa7f9b4e442dde16df51ec5e6 WatchSource:0}: Error finding container 94082d3a227cf71533b583d64cb2fafb87b3f0eaa7f9b4e442dde16df51ec5e6: Status 404 returned error can't find the container with id 94082d3a227cf71533b583d64cb2fafb87b3f0eaa7f9b4e442dde16df51ec5e6 Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.565986 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-29cl8"] Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.597346 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-8b4d-account-create-j47jt" event={"ID":"58597054-bb2c-440e-88cc-7f969e6ee0bb","Type":"ContainerStarted","Data":"52cedcbdb124cf0e5f6e6537f269a0d0153f9307aac71cac89028ff484dc97a8"} Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.599675 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c26qd-config-dhv69" event={"ID":"f204b314-606c-454b-af05-5568f8f73075","Type":"ContainerStarted","Data":"a10a5608ab78bf3b26372c544acbb7d24d6382e2f69d35f3e6e060911a08561d"} Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.606917 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-dqk99" event={"ID":"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e","Type":"ContainerStarted","Data":"94082d3a227cf71533b583d64cb2fafb87b3f0eaa7f9b4e442dde16df51ec5e6"} Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.608892 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rbz2d" event={"ID":"717d7a0f-3f58-404d-8ffc-c95f75ebd799","Type":"ContainerStarted","Data":"25a199652cc346a2f6f736dcf33511d645c718a0266ac7cda17488a8f4a68640"} Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.610330 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9d818a0-17fd-44a2-8855-a6f847efe274","Type":"ContainerStarted","Data":"2cf6819b94d62fccf47fe857c4bedbcb3672a422e4bfda3c5103104951af3ed6"} Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.611319 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 25 09:07:47 crc kubenswrapper[4932]: W1125 09:07:47.618058 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode59aed73_dc42_4763_9c59_075e6206d38a.slice/crio-c3fd101937e2f451d7e80c66cbc489f9ad806966d4af2768489a768d013c8ba0 WatchSource:0}: Error finding container c3fd101937e2f451d7e80c66cbc489f9ad806966d4af2768489a768d013c8ba0: Status 404 returned error can't find the container with id c3fd101937e2f451d7e80c66cbc489f9ad806966d4af2768489a768d013c8ba0 Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.660446 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=1.965295666 podStartE2EDuration="37.660424839s" podCreationTimestamp="2025-11-25 09:07:10 +0000 UTC" firstStartedPulling="2025-11-25 09:07:10.97683231 +0000 UTC m=+1091.102861873" lastFinishedPulling="2025-11-25 09:07:46.671961483 +0000 UTC m=+1126.797991046" observedRunningTime="2025-11-25 09:07:47.652758234 +0000 UTC m=+1127.778787797" watchObservedRunningTime="2025-11-25 09:07:47.660424839 +0000 UTC m=+1127.786454402" Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.687771 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:07:47 crc kubenswrapper[4932]: I1125 09:07:47.917024 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4147-account-create-dgzdd"] Nov 25 09:07:47 crc kubenswrapper[4932]: W1125 09:07:47.921379 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc205eefb_e35b_43b4_8288_a96280db4b43.slice/crio-98102bae981d1c93090dfb7e94ae89eea020d125114156380c299600df4ecc21 WatchSource:0}: Error finding container 98102bae981d1c93090dfb7e94ae89eea020d125114156380c299600df4ecc21: Status 404 returned error can't find the container with id 98102bae981d1c93090dfb7e94ae89eea020d125114156380c299600df4ecc21 Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.012085 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-71a7-account-create-tg6zv"] Nov 25 09:07:48 crc kubenswrapper[4932]: W1125 09:07:48.024620 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod948cc941_e4e1_4f79_80e1_c3a9594314fc.slice/crio-b38cb85c43c84d678337d24220180ae63a2c516f758b449a0d108418bfbccafc WatchSource:0}: Error finding container b38cb85c43c84d678337d24220180ae63a2c516f758b449a0d108418bfbccafc: Status 404 returned error can't find the container with id b38cb85c43c84d678337d24220180ae63a2c516f758b449a0d108418bfbccafc Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.047790 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-m77q8"] Nov 25 09:07:48 crc kubenswrapper[4932]: W1125 09:07:48.053995 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62178c03_bbfb_4b80_b594_3507a4563e0b.slice/crio-49274f1da6d58613110fa96803876a0ab7901547c95aca4489e38ccbaa4123ee WatchSource:0}: Error finding container 49274f1da6d58613110fa96803876a0ab7901547c95aca4489e38ccbaa4123ee: Status 404 returned error can't find the container with id 49274f1da6d58613110fa96803876a0ab7901547c95aca4489e38ccbaa4123ee Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.622451 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-8b4d-account-create-j47jt" event={"ID":"58597054-bb2c-440e-88cc-7f969e6ee0bb","Type":"ContainerStarted","Data":"81ce3f95c5fe6793c19ca6b7421368b5e4668cff6c46e8b0b4ca229de7c578d4"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.625860 4932 generic.go:334] "Generic (PLEG): container finished" podID="f204b314-606c-454b-af05-5568f8f73075" containerID="880948a1911a933720439d209f58cb43b934a39763a09f23f4e4d68f382e39f0" exitCode=0 Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.626007 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c26qd-config-dhv69" event={"ID":"f204b314-606c-454b-af05-5568f8f73075","Type":"ContainerDied","Data":"880948a1911a933720439d209f58cb43b934a39763a09f23f4e4d68f382e39f0"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.627244 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-29cl8" event={"ID":"e59aed73-dc42-4763-9c59-075e6206d38a","Type":"ContainerStarted","Data":"6cc06260f5de9f1255bd24848bb0fbd27b365ac50fad917fa272ef82e787fc83"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.627304 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-29cl8" event={"ID":"e59aed73-dc42-4763-9c59-075e6206d38a","Type":"ContainerStarted","Data":"c3fd101937e2f451d7e80c66cbc489f9ad806966d4af2768489a768d013c8ba0"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.629212 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4147-account-create-dgzdd" event={"ID":"c205eefb-e35b-43b4-8288-a96280db4b43","Type":"ContainerStarted","Data":"bc3a346b5f0cec5224a51e30b5104fc851f6b8aab105dbefbcf8a761ba43f86d"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.629234 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4147-account-create-dgzdd" event={"ID":"c205eefb-e35b-43b4-8288-a96280db4b43","Type":"ContainerStarted","Data":"98102bae981d1c93090dfb7e94ae89eea020d125114156380c299600df4ecc21"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.630493 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rbz2d" event={"ID":"717d7a0f-3f58-404d-8ffc-c95f75ebd799","Type":"ContainerStarted","Data":"b0df38b4f04c201155a87a3a640ea8f360a9303ed174d877947a925eca63845d"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.631932 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-m77q8" event={"ID":"62178c03-bbfb-4b80-b594-3507a4563e0b","Type":"ContainerStarted","Data":"59addd419d69845cf7dbc0a57d1607ea8827c861a9211b90e147c4ea470215f1"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.631962 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-m77q8" event={"ID":"62178c03-bbfb-4b80-b594-3507a4563e0b","Type":"ContainerStarted","Data":"49274f1da6d58613110fa96803876a0ab7901547c95aca4489e38ccbaa4123ee"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.633829 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-sqgkm" event={"ID":"ee8b5a64-5144-4fd9-a7b0-b12d318ababa","Type":"ContainerStarted","Data":"3f08ff77620cbdd809125bb2f2203d8655b5c5662687eebc59d5533391ed8c1a"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.635522 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"c81103b32956702122f1da404fdef6de8f7bedd7bcba09ad78af06eea03a4c2e"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.638724 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-71a7-account-create-tg6zv" event={"ID":"948cc941-e4e1-4f79-80e1-c3a9594314fc","Type":"ContainerStarted","Data":"e8736a0e8dcdf7ceb436e66a1a1a5deb43accae216dccd68fd9dabbd866226f7"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.638772 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-71a7-account-create-tg6zv" event={"ID":"948cc941-e4e1-4f79-80e1-c3a9594314fc","Type":"ContainerStarted","Data":"b38cb85c43c84d678337d24220180ae63a2c516f758b449a0d108418bfbccafc"} Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.657005 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-8b4d-account-create-j47jt" podStartSLOduration=2.656969 podStartE2EDuration="2.656969s" podCreationTimestamp="2025-11-25 09:07:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:07:48.652685995 +0000 UTC m=+1128.778715578" watchObservedRunningTime="2025-11-25 09:07:48.656969 +0000 UTC m=+1128.782998573" Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.669729 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-71a7-account-create-tg6zv" podStartSLOduration=2.669708043 podStartE2EDuration="2.669708043s" podCreationTimestamp="2025-11-25 09:07:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:07:48.667605701 +0000 UTC m=+1128.793635274" watchObservedRunningTime="2025-11-25 09:07:48.669708043 +0000 UTC m=+1128.795737616" Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.733822 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-sqgkm" podStartSLOduration=3.206905576 podStartE2EDuration="14.733805858s" podCreationTimestamp="2025-11-25 09:07:34 +0000 UTC" firstStartedPulling="2025-11-25 09:07:35.191398267 +0000 UTC m=+1115.317427830" lastFinishedPulling="2025-11-25 09:07:46.718298549 +0000 UTC m=+1126.844328112" observedRunningTime="2025-11-25 09:07:48.723645521 +0000 UTC m=+1128.849675084" watchObservedRunningTime="2025-11-25 09:07:48.733805858 +0000 UTC m=+1128.859835421" Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.750691 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-4147-account-create-dgzdd" podStartSLOduration=2.7506731909999997 podStartE2EDuration="2.750673191s" podCreationTimestamp="2025-11-25 09:07:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:07:48.748776946 +0000 UTC m=+1128.874806539" watchObservedRunningTime="2025-11-25 09:07:48.750673191 +0000 UTC m=+1128.876702754" Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.775474 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-rbz2d" podStartSLOduration=2.775449176 podStartE2EDuration="2.775449176s" podCreationTimestamp="2025-11-25 09:07:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:07:48.771232583 +0000 UTC m=+1128.897262156" watchObservedRunningTime="2025-11-25 09:07:48.775449176 +0000 UTC m=+1128.901478729" Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.790585 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-29cl8" podStartSLOduration=2.790563938 podStartE2EDuration="2.790563938s" podCreationTimestamp="2025-11-25 09:07:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:07:48.787850459 +0000 UTC m=+1128.913880042" watchObservedRunningTime="2025-11-25 09:07:48.790563938 +0000 UTC m=+1128.916593501" Nov 25 09:07:48 crc kubenswrapper[4932]: I1125 09:07:48.804865 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-m77q8" podStartSLOduration=2.8048456059999998 podStartE2EDuration="2.804845606s" podCreationTimestamp="2025-11-25 09:07:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:07:48.803027013 +0000 UTC m=+1128.929056576" watchObservedRunningTime="2025-11-25 09:07:48.804845606 +0000 UTC m=+1128.930875169" Nov 25 09:07:49 crc kubenswrapper[4932]: I1125 09:07:49.646953 4932 generic.go:334] "Generic (PLEG): container finished" podID="e59aed73-dc42-4763-9c59-075e6206d38a" containerID="6cc06260f5de9f1255bd24848bb0fbd27b365ac50fad917fa272ef82e787fc83" exitCode=0 Nov 25 09:07:49 crc kubenswrapper[4932]: I1125 09:07:49.647029 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-29cl8" event={"ID":"e59aed73-dc42-4763-9c59-075e6206d38a","Type":"ContainerDied","Data":"6cc06260f5de9f1255bd24848bb0fbd27b365ac50fad917fa272ef82e787fc83"} Nov 25 09:07:49 crc kubenswrapper[4932]: I1125 09:07:49.649267 4932 generic.go:334] "Generic (PLEG): container finished" podID="c205eefb-e35b-43b4-8288-a96280db4b43" containerID="bc3a346b5f0cec5224a51e30b5104fc851f6b8aab105dbefbcf8a761ba43f86d" exitCode=0 Nov 25 09:07:49 crc kubenswrapper[4932]: I1125 09:07:49.649351 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4147-account-create-dgzdd" event={"ID":"c205eefb-e35b-43b4-8288-a96280db4b43","Type":"ContainerDied","Data":"bc3a346b5f0cec5224a51e30b5104fc851f6b8aab105dbefbcf8a761ba43f86d"} Nov 25 09:07:49 crc kubenswrapper[4932]: I1125 09:07:49.651615 4932 generic.go:334] "Generic (PLEG): container finished" podID="717d7a0f-3f58-404d-8ffc-c95f75ebd799" containerID="b0df38b4f04c201155a87a3a640ea8f360a9303ed174d877947a925eca63845d" exitCode=0 Nov 25 09:07:49 crc kubenswrapper[4932]: I1125 09:07:49.651676 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rbz2d" event={"ID":"717d7a0f-3f58-404d-8ffc-c95f75ebd799","Type":"ContainerDied","Data":"b0df38b4f04c201155a87a3a640ea8f360a9303ed174d877947a925eca63845d"} Nov 25 09:07:49 crc kubenswrapper[4932]: I1125 09:07:49.653759 4932 generic.go:334] "Generic (PLEG): container finished" podID="58597054-bb2c-440e-88cc-7f969e6ee0bb" containerID="81ce3f95c5fe6793c19ca6b7421368b5e4668cff6c46e8b0b4ca229de7c578d4" exitCode=0 Nov 25 09:07:49 crc kubenswrapper[4932]: I1125 09:07:49.653846 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-8b4d-account-create-j47jt" event={"ID":"58597054-bb2c-440e-88cc-7f969e6ee0bb","Type":"ContainerDied","Data":"81ce3f95c5fe6793c19ca6b7421368b5e4668cff6c46e8b0b4ca229de7c578d4"} Nov 25 09:07:49 crc kubenswrapper[4932]: I1125 09:07:49.655871 4932 generic.go:334] "Generic (PLEG): container finished" podID="948cc941-e4e1-4f79-80e1-c3a9594314fc" containerID="e8736a0e8dcdf7ceb436e66a1a1a5deb43accae216dccd68fd9dabbd866226f7" exitCode=0 Nov 25 09:07:49 crc kubenswrapper[4932]: I1125 09:07:49.655933 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-71a7-account-create-tg6zv" event={"ID":"948cc941-e4e1-4f79-80e1-c3a9594314fc","Type":"ContainerDied","Data":"e8736a0e8dcdf7ceb436e66a1a1a5deb43accae216dccd68fd9dabbd866226f7"} Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.359462 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.464840 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mn6f\" (UniqueName: \"kubernetes.io/projected/f204b314-606c-454b-af05-5568f8f73075-kube-api-access-2mn6f\") pod \"f204b314-606c-454b-af05-5568f8f73075\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.465213 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-log-ovn\") pod \"f204b314-606c-454b-af05-5568f8f73075\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.465262 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run\") pod \"f204b314-606c-454b-af05-5568f8f73075\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.465549 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-scripts\") pod \"f204b314-606c-454b-af05-5568f8f73075\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.465585 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run-ovn\") pod \"f204b314-606c-454b-af05-5568f8f73075\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.465815 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-additional-scripts\") pod \"f204b314-606c-454b-af05-5568f8f73075\" (UID: \"f204b314-606c-454b-af05-5568f8f73075\") " Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.466008 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run" (OuterVolumeSpecName: "var-run") pod "f204b314-606c-454b-af05-5568f8f73075" (UID: "f204b314-606c-454b-af05-5568f8f73075"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.466060 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "f204b314-606c-454b-af05-5568f8f73075" (UID: "f204b314-606c-454b-af05-5568f8f73075"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.466603 4932 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.466621 4932 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.466648 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "f204b314-606c-454b-af05-5568f8f73075" (UID: "f204b314-606c-454b-af05-5568f8f73075"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.466893 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-scripts" (OuterVolumeSpecName: "scripts") pod "f204b314-606c-454b-af05-5568f8f73075" (UID: "f204b314-606c-454b-af05-5568f8f73075"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.467483 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "f204b314-606c-454b-af05-5568f8f73075" (UID: "f204b314-606c-454b-af05-5568f8f73075"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.470554 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f204b314-606c-454b-af05-5568f8f73075-kube-api-access-2mn6f" (OuterVolumeSpecName: "kube-api-access-2mn6f") pod "f204b314-606c-454b-af05-5568f8f73075" (UID: "f204b314-606c-454b-af05-5568f8f73075"). InnerVolumeSpecName "kube-api-access-2mn6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.546839 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-c26qd" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.569393 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.569431 4932 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/f204b314-606c-454b-af05-5568f8f73075-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.569443 4932 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/f204b314-606c-454b-af05-5568f8f73075-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.569458 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mn6f\" (UniqueName: \"kubernetes.io/projected/f204b314-606c-454b-af05-5568f8f73075-kube-api-access-2mn6f\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.664839 4932 generic.go:334] "Generic (PLEG): container finished" podID="62178c03-bbfb-4b80-b594-3507a4563e0b" containerID="59addd419d69845cf7dbc0a57d1607ea8827c861a9211b90e147c4ea470215f1" exitCode=0 Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.664923 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-m77q8" event={"ID":"62178c03-bbfb-4b80-b594-3507a4563e0b","Type":"ContainerDied","Data":"59addd419d69845cf7dbc0a57d1607ea8827c861a9211b90e147c4ea470215f1"} Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.667452 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c26qd-config-dhv69" event={"ID":"f204b314-606c-454b-af05-5568f8f73075","Type":"ContainerDied","Data":"a10a5608ab78bf3b26372c544acbb7d24d6382e2f69d35f3e6e060911a08561d"} Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.667495 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a10a5608ab78bf3b26372c544acbb7d24d6382e2f69d35f3e6e060911a08561d" Nov 25 09:07:50 crc kubenswrapper[4932]: I1125 09:07:50.667536 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd-config-dhv69" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.453414 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-c26qd-config-dhv69"] Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.461636 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-c26qd-config-dhv69"] Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.554763 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-c26qd-config-4ps55"] Nov 25 09:07:51 crc kubenswrapper[4932]: E1125 09:07:51.555083 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f204b314-606c-454b-af05-5568f8f73075" containerName="ovn-config" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.555098 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f204b314-606c-454b-af05-5568f8f73075" containerName="ovn-config" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.555270 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f204b314-606c-454b-af05-5568f8f73075" containerName="ovn-config" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.555775 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.558033 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.631390 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c26qd-config-4ps55"] Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.679863 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-29cl8" event={"ID":"e59aed73-dc42-4763-9c59-075e6206d38a","Type":"ContainerDied","Data":"c3fd101937e2f451d7e80c66cbc489f9ad806966d4af2768489a768d013c8ba0"} Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.679905 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c3fd101937e2f451d7e80c66cbc489f9ad806966d4af2768489a768d013c8ba0" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.681251 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4147-account-create-dgzdd" event={"ID":"c205eefb-e35b-43b4-8288-a96280db4b43","Type":"ContainerDied","Data":"98102bae981d1c93090dfb7e94ae89eea020d125114156380c299600df4ecc21"} Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.681273 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="98102bae981d1c93090dfb7e94ae89eea020d125114156380c299600df4ecc21" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.682339 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-rbz2d" event={"ID":"717d7a0f-3f58-404d-8ffc-c95f75ebd799","Type":"ContainerDied","Data":"25a199652cc346a2f6f736dcf33511d645c718a0266ac7cda17488a8f4a68640"} Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.682357 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25a199652cc346a2f6f736dcf33511d645c718a0266ac7cda17488a8f4a68640" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.683400 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-8b4d-account-create-j47jt" event={"ID":"58597054-bb2c-440e-88cc-7f969e6ee0bb","Type":"ContainerDied","Data":"52cedcbdb124cf0e5f6e6537f269a0d0153f9307aac71cac89028ff484dc97a8"} Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.683419 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52cedcbdb124cf0e5f6e6537f269a0d0153f9307aac71cac89028ff484dc97a8" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.684671 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-71a7-account-create-tg6zv" event={"ID":"948cc941-e4e1-4f79-80e1-c3a9594314fc","Type":"ContainerDied","Data":"b38cb85c43c84d678337d24220180ae63a2c516f758b449a0d108418bfbccafc"} Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.684691 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b38cb85c43c84d678337d24220180ae63a2c516f758b449a0d108418bfbccafc" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.690042 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhs4q\" (UniqueName: \"kubernetes.io/projected/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-kube-api-access-rhs4q\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.690126 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.690178 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-scripts\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.690247 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run-ovn\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.690377 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-additional-scripts\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.690408 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-log-ovn\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.758266 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4147-account-create-dgzdd" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.765443 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rbz2d" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.772029 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-71a7-account-create-tg6zv" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.781645 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-29cl8" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.790114 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-8b4d-account-create-j47jt" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.792851 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-scripts\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.792902 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run-ovn\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.792955 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-additional-scripts\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.792974 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-log-ovn\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.793028 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhs4q\" (UniqueName: \"kubernetes.io/projected/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-kube-api-access-rhs4q\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.793055 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.795824 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-log-ovn\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.795875 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run-ovn\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.796115 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-scripts\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.796328 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-additional-scripts\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.796348 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.819573 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhs4q\" (UniqueName: \"kubernetes.io/projected/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-kube-api-access-rhs4q\") pod \"ovn-controller-c26qd-config-4ps55\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.894226 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.894438 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxx7w\" (UniqueName: \"kubernetes.io/projected/e59aed73-dc42-4763-9c59-075e6206d38a-kube-api-access-kxx7w\") pod \"e59aed73-dc42-4763-9c59-075e6206d38a\" (UID: \"e59aed73-dc42-4763-9c59-075e6206d38a\") " Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.894529 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/717d7a0f-3f58-404d-8ffc-c95f75ebd799-operator-scripts\") pod \"717d7a0f-3f58-404d-8ffc-c95f75ebd799\" (UID: \"717d7a0f-3f58-404d-8ffc-c95f75ebd799\") " Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.894578 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqg5p\" (UniqueName: \"kubernetes.io/projected/948cc941-e4e1-4f79-80e1-c3a9594314fc-kube-api-access-mqg5p\") pod \"948cc941-e4e1-4f79-80e1-c3a9594314fc\" (UID: \"948cc941-e4e1-4f79-80e1-c3a9594314fc\") " Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.894603 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/948cc941-e4e1-4f79-80e1-c3a9594314fc-operator-scripts\") pod \"948cc941-e4e1-4f79-80e1-c3a9594314fc\" (UID: \"948cc941-e4e1-4f79-80e1-c3a9594314fc\") " Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.894647 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rlprz\" (UniqueName: \"kubernetes.io/projected/58597054-bb2c-440e-88cc-7f969e6ee0bb-kube-api-access-rlprz\") pod \"58597054-bb2c-440e-88cc-7f969e6ee0bb\" (UID: \"58597054-bb2c-440e-88cc-7f969e6ee0bb\") " Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.894669 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7l5jw\" (UniqueName: \"kubernetes.io/projected/c205eefb-e35b-43b4-8288-a96280db4b43-kube-api-access-7l5jw\") pod \"c205eefb-e35b-43b4-8288-a96280db4b43\" (UID: \"c205eefb-e35b-43b4-8288-a96280db4b43\") " Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.894717 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ch62\" (UniqueName: \"kubernetes.io/projected/717d7a0f-3f58-404d-8ffc-c95f75ebd799-kube-api-access-8ch62\") pod \"717d7a0f-3f58-404d-8ffc-c95f75ebd799\" (UID: \"717d7a0f-3f58-404d-8ffc-c95f75ebd799\") " Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.894750 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e59aed73-dc42-4763-9c59-075e6206d38a-operator-scripts\") pod \"e59aed73-dc42-4763-9c59-075e6206d38a\" (UID: \"e59aed73-dc42-4763-9c59-075e6206d38a\") " Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.894768 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58597054-bb2c-440e-88cc-7f969e6ee0bb-operator-scripts\") pod \"58597054-bb2c-440e-88cc-7f969e6ee0bb\" (UID: \"58597054-bb2c-440e-88cc-7f969e6ee0bb\") " Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.894800 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c205eefb-e35b-43b4-8288-a96280db4b43-operator-scripts\") pod \"c205eefb-e35b-43b4-8288-a96280db4b43\" (UID: \"c205eefb-e35b-43b4-8288-a96280db4b43\") " Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.895762 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c205eefb-e35b-43b4-8288-a96280db4b43-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c205eefb-e35b-43b4-8288-a96280db4b43" (UID: "c205eefb-e35b-43b4-8288-a96280db4b43"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.895796 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e59aed73-dc42-4763-9c59-075e6206d38a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e59aed73-dc42-4763-9c59-075e6206d38a" (UID: "e59aed73-dc42-4763-9c59-075e6206d38a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.896174 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58597054-bb2c-440e-88cc-7f969e6ee0bb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "58597054-bb2c-440e-88cc-7f969e6ee0bb" (UID: "58597054-bb2c-440e-88cc-7f969e6ee0bb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.896301 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/717d7a0f-3f58-404d-8ffc-c95f75ebd799-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "717d7a0f-3f58-404d-8ffc-c95f75ebd799" (UID: "717d7a0f-3f58-404d-8ffc-c95f75ebd799"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.896653 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/948cc941-e4e1-4f79-80e1-c3a9594314fc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "948cc941-e4e1-4f79-80e1-c3a9594314fc" (UID: "948cc941-e4e1-4f79-80e1-c3a9594314fc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.900618 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58597054-bb2c-440e-88cc-7f969e6ee0bb-kube-api-access-rlprz" (OuterVolumeSpecName: "kube-api-access-rlprz") pod "58597054-bb2c-440e-88cc-7f969e6ee0bb" (UID: "58597054-bb2c-440e-88cc-7f969e6ee0bb"). InnerVolumeSpecName "kube-api-access-rlprz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.902630 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/948cc941-e4e1-4f79-80e1-c3a9594314fc-kube-api-access-mqg5p" (OuterVolumeSpecName: "kube-api-access-mqg5p") pod "948cc941-e4e1-4f79-80e1-c3a9594314fc" (UID: "948cc941-e4e1-4f79-80e1-c3a9594314fc"). InnerVolumeSpecName "kube-api-access-mqg5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.902725 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c205eefb-e35b-43b4-8288-a96280db4b43-kube-api-access-7l5jw" (OuterVolumeSpecName: "kube-api-access-7l5jw") pod "c205eefb-e35b-43b4-8288-a96280db4b43" (UID: "c205eefb-e35b-43b4-8288-a96280db4b43"). InnerVolumeSpecName "kube-api-access-7l5jw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.916069 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/717d7a0f-3f58-404d-8ffc-c95f75ebd799-kube-api-access-8ch62" (OuterVolumeSpecName: "kube-api-access-8ch62") pod "717d7a0f-3f58-404d-8ffc-c95f75ebd799" (UID: "717d7a0f-3f58-404d-8ffc-c95f75ebd799"). InnerVolumeSpecName "kube-api-access-8ch62". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:51 crc kubenswrapper[4932]: I1125 09:07:51.916123 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e59aed73-dc42-4763-9c59-075e6206d38a-kube-api-access-kxx7w" (OuterVolumeSpecName: "kube-api-access-kxx7w") pod "e59aed73-dc42-4763-9c59-075e6206d38a" (UID: "e59aed73-dc42-4763-9c59-075e6206d38a"). InnerVolumeSpecName "kube-api-access-kxx7w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.036749 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rlprz\" (UniqueName: \"kubernetes.io/projected/58597054-bb2c-440e-88cc-7f969e6ee0bb-kube-api-access-rlprz\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.036788 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7l5jw\" (UniqueName: \"kubernetes.io/projected/c205eefb-e35b-43b4-8288-a96280db4b43-kube-api-access-7l5jw\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.036797 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ch62\" (UniqueName: \"kubernetes.io/projected/717d7a0f-3f58-404d-8ffc-c95f75ebd799-kube-api-access-8ch62\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.036807 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e59aed73-dc42-4763-9c59-075e6206d38a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.036817 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/58597054-bb2c-440e-88cc-7f969e6ee0bb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.036826 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c205eefb-e35b-43b4-8288-a96280db4b43-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.036834 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxx7w\" (UniqueName: \"kubernetes.io/projected/e59aed73-dc42-4763-9c59-075e6206d38a-kube-api-access-kxx7w\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.036842 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/717d7a0f-3f58-404d-8ffc-c95f75ebd799-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.036850 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqg5p\" (UniqueName: \"kubernetes.io/projected/948cc941-e4e1-4f79-80e1-c3a9594314fc-kube-api-access-mqg5p\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.036859 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/948cc941-e4e1-4f79-80e1-c3a9594314fc-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.616131 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f204b314-606c-454b-af05-5568f8f73075" path="/var/lib/kubelet/pods/f204b314-606c-454b-af05-5568f8f73075/volumes" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.693915 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4147-account-create-dgzdd" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.693946 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-8b4d-account-create-j47jt" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.693992 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-71a7-account-create-tg6zv" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.694052 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-29cl8" Nov 25 09:07:52 crc kubenswrapper[4932]: I1125 09:07:52.694096 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-rbz2d" Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.361074 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-m77q8" Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.410515 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvvkb\" (UniqueName: \"kubernetes.io/projected/62178c03-bbfb-4b80-b594-3507a4563e0b-kube-api-access-zvvkb\") pod \"62178c03-bbfb-4b80-b594-3507a4563e0b\" (UID: \"62178c03-bbfb-4b80-b594-3507a4563e0b\") " Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.410565 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62178c03-bbfb-4b80-b594-3507a4563e0b-operator-scripts\") pod \"62178c03-bbfb-4b80-b594-3507a4563e0b\" (UID: \"62178c03-bbfb-4b80-b594-3507a4563e0b\") " Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.411438 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62178c03-bbfb-4b80-b594-3507a4563e0b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "62178c03-bbfb-4b80-b594-3507a4563e0b" (UID: "62178c03-bbfb-4b80-b594-3507a4563e0b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.416665 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62178c03-bbfb-4b80-b594-3507a4563e0b-kube-api-access-zvvkb" (OuterVolumeSpecName: "kube-api-access-zvvkb") pod "62178c03-bbfb-4b80-b594-3507a4563e0b" (UID: "62178c03-bbfb-4b80-b594-3507a4563e0b"). InnerVolumeSpecName "kube-api-access-zvvkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.512314 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvvkb\" (UniqueName: \"kubernetes.io/projected/62178c03-bbfb-4b80-b594-3507a4563e0b-kube-api-access-zvvkb\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.512554 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62178c03-bbfb-4b80-b594-3507a4563e0b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.632920 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-c26qd-config-4ps55"] Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.735665 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c26qd-config-4ps55" event={"ID":"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2","Type":"ContainerStarted","Data":"c17bd13ea762604e7643bed94086e1008b7dafc456fa327b4716564d14c2102a"} Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.737220 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-m77q8" event={"ID":"62178c03-bbfb-4b80-b594-3507a4563e0b","Type":"ContainerDied","Data":"49274f1da6d58613110fa96803876a0ab7901547c95aca4489e38ccbaa4123ee"} Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.737247 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49274f1da6d58613110fa96803876a0ab7901547c95aca4489e38ccbaa4123ee" Nov 25 09:07:56 crc kubenswrapper[4932]: I1125 09:07:56.737261 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-m77q8" Nov 25 09:07:57 crc kubenswrapper[4932]: I1125 09:07:57.754098 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"a5d5ffb7d109b7b5eac7b58236b7451c24c1c17dca222fe9b3d425e19a748cf5"} Nov 25 09:07:57 crc kubenswrapper[4932]: I1125 09:07:57.754486 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"00edaf8b62c16ee50bbf819b1838d3ce3fd0a27605f5823b0347afa99c531c70"} Nov 25 09:07:57 crc kubenswrapper[4932]: I1125 09:07:57.763859 4932 generic.go:334] "Generic (PLEG): container finished" podID="08127ddc-f26b-434f-86a7-6ad1f7e7d0b2" containerID="271dbf2fd905420d2a8bdde0013d1a75c5748c6abb55b77de54d92992a396a5e" exitCode=0 Nov 25 09:07:57 crc kubenswrapper[4932]: I1125 09:07:57.763919 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c26qd-config-4ps55" event={"ID":"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2","Type":"ContainerDied","Data":"271dbf2fd905420d2a8bdde0013d1a75c5748c6abb55b77de54d92992a396a5e"} Nov 25 09:07:58 crc kubenswrapper[4932]: I1125 09:07:58.774024 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"8d2c29a0a166c6ebbf9113b41e4e2ba9e248c36be93456a40622f2d5fcc2066e"} Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.631300 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.865919 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989251 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-additional-scripts\") pod \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989324 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-log-ovn\") pod \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989417 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run-ovn\") pod \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989457 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run\") pod \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989460 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2" (UID: "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989495 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2" (UID: "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989499 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-scripts\") pod \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989511 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run" (OuterVolumeSpecName: "var-run") pod "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2" (UID: "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989570 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhs4q\" (UniqueName: \"kubernetes.io/projected/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-kube-api-access-rhs4q\") pod \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\" (UID: \"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2\") " Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989681 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2" (UID: "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989958 4932 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989977 4932 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.989988 4932 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.990001 4932 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.990268 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-scripts" (OuterVolumeSpecName: "scripts") pod "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2" (UID: "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:00.995611 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-kube-api-access-rhs4q" (OuterVolumeSpecName: "kube-api-access-rhs4q") pod "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2" (UID: "08127ddc-f26b-434f-86a7-6ad1f7e7d0b2"). InnerVolumeSpecName "kube-api-access-rhs4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:01.091895 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:01.091931 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhs4q\" (UniqueName: \"kubernetes.io/projected/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2-kube-api-access-rhs4q\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:01.799265 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c26qd-config-4ps55" event={"ID":"08127ddc-f26b-434f-86a7-6ad1f7e7d0b2","Type":"ContainerDied","Data":"c17bd13ea762604e7643bed94086e1008b7dafc456fa327b4716564d14c2102a"} Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:01.799550 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c17bd13ea762604e7643bed94086e1008b7dafc456fa327b4716564d14c2102a" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:01.799618 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd-config-4ps55" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:01.947060 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-c26qd-config-4ps55"] Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:01.955211 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-c26qd-config-4ps55"] Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:02.614769 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08127ddc-f26b-434f-86a7-6ad1f7e7d0b2" path="/var/lib/kubelet/pods/08127ddc-f26b-434f-86a7-6ad1f7e7d0b2/volumes" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:02.808982 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"a45a7147f788b51504a339167827aa53fdb2e4a2d35f004cd41d1718e61f00a0"} Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:02.810626 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-dqk99" event={"ID":"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e","Type":"ContainerStarted","Data":"ddc3cfbc12b56e6bea03e56f92c9f19e040a8c2c6c86835eb76d2890eeb52bb3"} Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:07.181184 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:07.181627 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:07.181662 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:07.182257 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"96dca2522fb61785e671095f31f8032a7eb4d218c261ece3b2cefa1b8cd2013b"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:07.182302 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://96dca2522fb61785e671095f31f8032a7eb4d218c261ece3b2cefa1b8cd2013b" gracePeriod=600 Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:07.602347 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="metallb-system/frr-k8s-2vpc5" podUID="90887994-5f04-4a8a-abd7-6e6e6d1240f4" containerName="frr" probeResult="failure" output="Get \"http://127.0.0.1:7573/livez\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:08.861681 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="96dca2522fb61785e671095f31f8032a7eb4d218c261ece3b2cefa1b8cd2013b" exitCode=0 Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:08.861765 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"96dca2522fb61785e671095f31f8032a7eb4d218c261ece3b2cefa1b8cd2013b"} Nov 25 09:08:18 crc kubenswrapper[4932]: I1125 09:08:08.862021 4932 scope.go:117] "RemoveContainer" containerID="91eb2e40d6f72fe209b50e6c986a543f1a5accc33bb0098951f158439d3b5195" Nov 25 09:08:18 crc kubenswrapper[4932]: E1125 09:08:18.970108 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75" Nov 25 09:08:18 crc kubenswrapper[4932]: E1125 09:08:18.970672 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:container-server,Image:quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75,Command:[/usr/bin/swift-container-server /etc/swift/container-server.conf.d -v],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:container,HostPort:0,ContainerPort:6201,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5b7h56h9dh94h67bh697h95h55hbh555h556h675h5fdh57dh579h5fbh64fh5c9h687hb6h678h5d4h549h54h98h8ch564h5bh5bch55dhc8hf8q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:swift,ReadOnly:false,MountPath:/srv/node/pv,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-swift,ReadOnly:false,MountPath:/etc/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cache,ReadOnly:false,MountPath:/var/cache/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:lock,ReadOnly:false,MountPath:/var/lock,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-skkg2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42445,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-storage-0_openstack(81ccee4a-f414-4007-ae17-b440b55dea5f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:08:19 crc kubenswrapper[4932]: I1125 09:08:19.955066 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"647e12679adbf63ee5c63458089dff922023eeb7cd99a634cbd8c2a9db9a0cd7"} Nov 25 09:08:19 crc kubenswrapper[4932]: I1125 09:08:19.975695 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-dqk99" podStartSLOduration=20.200133673 podStartE2EDuration="33.975675983s" podCreationTimestamp="2025-11-25 09:07:46 +0000 UTC" firstStartedPulling="2025-11-25 09:07:47.519810265 +0000 UTC m=+1127.645839828" lastFinishedPulling="2025-11-25 09:08:01.295352575 +0000 UTC m=+1141.421382138" observedRunningTime="2025-11-25 09:08:02.83317562 +0000 UTC m=+1142.959205183" watchObservedRunningTime="2025-11-25 09:08:19.975675983 +0000 UTC m=+1160.101705556" Nov 25 09:08:20 crc kubenswrapper[4932]: I1125 09:08:20.965279 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"5a51256b52321de55e8f314f90f9c0ba2bb1175fd2f75c69c2bef2451a36ec18"} Nov 25 09:08:20 crc kubenswrapper[4932]: I1125 09:08:20.965748 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"0a8e07a6bdc220d1412d97c2b357bdc14500c61e6d93f00a0915d134c315a151"} Nov 25 09:08:20 crc kubenswrapper[4932]: I1125 09:08:20.965762 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"0e9f1ea09136d57750420bc0ce46abbfd67cd0b1239ce71468be11a57e791720"} Nov 25 09:08:21 crc kubenswrapper[4932]: E1125 09:08:21.710669 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\"]" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" Nov 25 09:08:21 crc kubenswrapper[4932]: I1125 09:08:21.996532 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"a9a48f9fe27c63900394a2e67fd1df3228736d3ba3410cb4defb99fc16d721f1"} Nov 25 09:08:21 crc kubenswrapper[4932]: I1125 09:08:21.996598 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"78edf79de3cfd571e1fec0bd599680cc34a039f29b8fc703497738f0cf348ad8"} Nov 25 09:08:21 crc kubenswrapper[4932]: I1125 09:08:21.996608 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"07e4106840372eb90e9a0d57a59631587d0bde7ac43d138cc5d5ec8a10885a84"} Nov 25 09:08:21 crc kubenswrapper[4932]: I1125 09:08:21.996616 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"06eca31abe1c59ab7a8cb701e4a49245773b3f648c29cdbe3b219489466d8705"} Nov 25 09:08:22 crc kubenswrapper[4932]: I1125 09:08:22.003761 4932 generic.go:334] "Generic (PLEG): container finished" podID="d068cda9-60f0-4802-ae8c-bbb4bb9ac33e" containerID="ddc3cfbc12b56e6bea03e56f92c9f19e040a8c2c6c86835eb76d2890eeb52bb3" exitCode=0 Nov 25 09:08:22 crc kubenswrapper[4932]: I1125 09:08:22.003814 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-dqk99" event={"ID":"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e","Type":"ContainerDied","Data":"ddc3cfbc12b56e6bea03e56f92c9f19e040a8c2c6c86835eb76d2890eeb52bb3"} Nov 25 09:08:22 crc kubenswrapper[4932]: E1125 09:08:22.008512 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\"]" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" Nov 25 09:08:23 crc kubenswrapper[4932]: E1125 09:08:23.016378 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\"]" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" Nov 25 09:08:23 crc kubenswrapper[4932]: I1125 09:08:23.263656 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-dqk99" Nov 25 09:08:23 crc kubenswrapper[4932]: I1125 09:08:23.379582 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-combined-ca-bundle\") pod \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " Nov 25 09:08:23 crc kubenswrapper[4932]: I1125 09:08:23.379673 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4xgh8\" (UniqueName: \"kubernetes.io/projected/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-kube-api-access-4xgh8\") pod \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " Nov 25 09:08:23 crc kubenswrapper[4932]: I1125 09:08:23.379707 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-config-data\") pod \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\" (UID: \"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e\") " Nov 25 09:08:23 crc kubenswrapper[4932]: I1125 09:08:23.385076 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-kube-api-access-4xgh8" (OuterVolumeSpecName: "kube-api-access-4xgh8") pod "d068cda9-60f0-4802-ae8c-bbb4bb9ac33e" (UID: "d068cda9-60f0-4802-ae8c-bbb4bb9ac33e"). InnerVolumeSpecName "kube-api-access-4xgh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:23 crc kubenswrapper[4932]: I1125 09:08:23.403384 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d068cda9-60f0-4802-ae8c-bbb4bb9ac33e" (UID: "d068cda9-60f0-4802-ae8c-bbb4bb9ac33e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:23 crc kubenswrapper[4932]: I1125 09:08:23.421807 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-config-data" (OuterVolumeSpecName: "config-data") pod "d068cda9-60f0-4802-ae8c-bbb4bb9ac33e" (UID: "d068cda9-60f0-4802-ae8c-bbb4bb9ac33e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:23 crc kubenswrapper[4932]: I1125 09:08:23.481317 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:23 crc kubenswrapper[4932]: I1125 09:08:23.481353 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4xgh8\" (UniqueName: \"kubernetes.io/projected/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-kube-api-access-4xgh8\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:23 crc kubenswrapper[4932]: I1125 09:08:23.481366 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.018319 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-dqk99" event={"ID":"d068cda9-60f0-4802-ae8c-bbb4bb9ac33e","Type":"ContainerDied","Data":"94082d3a227cf71533b583d64cb2fafb87b3f0eaa7f9b4e442dde16df51ec5e6"} Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.018582 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94082d3a227cf71533b583d64cb2fafb87b3f0eaa7f9b4e442dde16df51ec5e6" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.018417 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-dqk99" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.334542 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8645678c8c-82dqz"] Nov 25 09:08:24 crc kubenswrapper[4932]: E1125 09:08:24.334963 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c205eefb-e35b-43b4-8288-a96280db4b43" containerName="mariadb-account-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.334986 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c205eefb-e35b-43b4-8288-a96280db4b43" containerName="mariadb-account-create" Nov 25 09:08:24 crc kubenswrapper[4932]: E1125 09:08:24.335001 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58597054-bb2c-440e-88cc-7f969e6ee0bb" containerName="mariadb-account-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335009 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="58597054-bb2c-440e-88cc-7f969e6ee0bb" containerName="mariadb-account-create" Nov 25 09:08:24 crc kubenswrapper[4932]: E1125 09:08:24.335020 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="717d7a0f-3f58-404d-8ffc-c95f75ebd799" containerName="mariadb-database-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335029 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="717d7a0f-3f58-404d-8ffc-c95f75ebd799" containerName="mariadb-database-create" Nov 25 09:08:24 crc kubenswrapper[4932]: E1125 09:08:24.335044 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d068cda9-60f0-4802-ae8c-bbb4bb9ac33e" containerName="keystone-db-sync" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335051 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d068cda9-60f0-4802-ae8c-bbb4bb9ac33e" containerName="keystone-db-sync" Nov 25 09:08:24 crc kubenswrapper[4932]: E1125 09:08:24.335064 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62178c03-bbfb-4b80-b594-3507a4563e0b" containerName="mariadb-database-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335073 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="62178c03-bbfb-4b80-b594-3507a4563e0b" containerName="mariadb-database-create" Nov 25 09:08:24 crc kubenswrapper[4932]: E1125 09:08:24.335094 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="948cc941-e4e1-4f79-80e1-c3a9594314fc" containerName="mariadb-account-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335102 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="948cc941-e4e1-4f79-80e1-c3a9594314fc" containerName="mariadb-account-create" Nov 25 09:08:24 crc kubenswrapper[4932]: E1125 09:08:24.335119 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08127ddc-f26b-434f-86a7-6ad1f7e7d0b2" containerName="ovn-config" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335126 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="08127ddc-f26b-434f-86a7-6ad1f7e7d0b2" containerName="ovn-config" Nov 25 09:08:24 crc kubenswrapper[4932]: E1125 09:08:24.335136 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e59aed73-dc42-4763-9c59-075e6206d38a" containerName="mariadb-database-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335143 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e59aed73-dc42-4763-9c59-075e6206d38a" containerName="mariadb-database-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335413 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d068cda9-60f0-4802-ae8c-bbb4bb9ac33e" containerName="keystone-db-sync" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335433 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="717d7a0f-3f58-404d-8ffc-c95f75ebd799" containerName="mariadb-database-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335445 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c205eefb-e35b-43b4-8288-a96280db4b43" containerName="mariadb-account-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335470 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="62178c03-bbfb-4b80-b594-3507a4563e0b" containerName="mariadb-database-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335487 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="58597054-bb2c-440e-88cc-7f969e6ee0bb" containerName="mariadb-account-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335503 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="08127ddc-f26b-434f-86a7-6ad1f7e7d0b2" containerName="ovn-config" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335516 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="948cc941-e4e1-4f79-80e1-c3a9594314fc" containerName="mariadb-account-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.335526 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e59aed73-dc42-4763-9c59-075e6206d38a" containerName="mariadb-database-create" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.336576 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.349459 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8645678c8c-82dqz"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.381340 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-k7fzg"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.382653 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.390983 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.391097 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.391432 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.391724 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.391009 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rgvcq" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.414134 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-k7fzg"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.503344 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rtq9\" (UniqueName: \"kubernetes.io/projected/5ab5e673-3e6f-4980-bec0-497b78924861-kube-api-access-8rtq9\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.503478 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-config\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.503510 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-combined-ca-bundle\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.503785 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-sb\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.503863 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tlv5\" (UniqueName: \"kubernetes.io/projected/edfa0291-1c8e-4744-be99-9674a93b10d2-kube-api-access-9tlv5\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.503963 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-nb\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.504040 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-dns-svc\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.504113 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-scripts\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.504329 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-fernet-keys\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.504414 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-credential-keys\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.504490 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-config-data\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.538322 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-lc5vk"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.540226 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.546424 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.546424 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.546897 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-v9kjf" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.551860 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-lc5vk"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.579938 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.583737 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.586971 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.588409 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.605821 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rtq9\" (UniqueName: \"kubernetes.io/projected/5ab5e673-3e6f-4980-bec0-497b78924861-kube-api-access-8rtq9\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.605885 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-config\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.605914 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-combined-ca-bundle\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.605991 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-sb\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.606034 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tlv5\" (UniqueName: \"kubernetes.io/projected/edfa0291-1c8e-4744-be99-9674a93b10d2-kube-api-access-9tlv5\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.606064 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-nb\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.606094 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-dns-svc\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.606119 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-scripts\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.606182 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-fernet-keys\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.606229 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-credential-keys\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.606256 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-config-data\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.607688 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-config\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.608077 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-nb\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.609642 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-sb\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.611795 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-dns-svc\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.628397 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-fernet-keys\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.629423 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.629870 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-config-data\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.629943 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-scripts\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.632838 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-combined-ca-bundle\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.638419 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-p579r"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.639658 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p579r" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.642224 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-credential-keys\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.645561 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tlv5\" (UniqueName: \"kubernetes.io/projected/edfa0291-1c8e-4744-be99-9674a93b10d2-kube-api-access-9tlv5\") pod \"dnsmasq-dns-8645678c8c-82dqz\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.645738 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.645976 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-8jckn" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.646344 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rtq9\" (UniqueName: \"kubernetes.io/projected/5ab5e673-3e6f-4980-bec0-497b78924861-kube-api-access-8rtq9\") pod \"keystone-bootstrap-k7fzg\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.667710 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.708362 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-p579r"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.717448 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718000 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-combined-ca-bundle\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718062 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-config-data\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718091 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-etc-machine-id\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718133 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfwws\" (UniqueName: \"kubernetes.io/projected/9bb5dd5d-4c94-434e-880d-f47d84a21724-kube-api-access-bfwws\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718287 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718312 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-log-httpd\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718344 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tljhf\" (UniqueName: \"kubernetes.io/projected/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-kube-api-access-tljhf\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718377 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-scripts\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718407 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718425 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-scripts\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718456 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-config-data\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718489 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-db-sync-config-data\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.718512 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-run-httpd\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.755206 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-k6hqv"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.756564 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.761559 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.766235 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-qg8rj" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.766479 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.821819 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-k6hqv"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822098 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-combined-ca-bundle\") pod \"barbican-db-sync-p579r\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " pod="openstack/barbican-db-sync-p579r" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822142 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-config-data\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822232 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-db-sync-config-data\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822260 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-run-httpd\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822345 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-combined-ca-bundle\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822417 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-config-data\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822467 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-etc-machine-id\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822497 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfwws\" (UniqueName: \"kubernetes.io/projected/9bb5dd5d-4c94-434e-880d-f47d84a21724-kube-api-access-bfwws\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822556 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-db-sync-config-data\") pod \"barbican-db-sync-p579r\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " pod="openstack/barbican-db-sync-p579r" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822583 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srp5z\" (UniqueName: \"kubernetes.io/projected/0f0aa1af-46c3-4583-9140-149dddf9b048-kube-api-access-srp5z\") pod \"barbican-db-sync-p579r\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " pod="openstack/barbican-db-sync-p579r" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822695 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822731 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-log-httpd\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822793 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tljhf\" (UniqueName: \"kubernetes.io/projected/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-kube-api-access-tljhf\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822821 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-scripts\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822879 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-scripts\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.822933 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.824516 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-run-httpd\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.825672 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-log-httpd\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.832928 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-scripts\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.833975 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-config-data\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.834246 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-scripts\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.834282 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-config-data\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.834313 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-etc-machine-id\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.837410 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.837961 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-combined-ca-bundle\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.851169 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.851497 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-db-sync-config-data\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.860519 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfwws\" (UniqueName: \"kubernetes.io/projected/9bb5dd5d-4c94-434e-880d-f47d84a21724-kube-api-access-bfwws\") pod \"ceilometer-0\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.860596 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8645678c8c-82dqz"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.862708 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tljhf\" (UniqueName: \"kubernetes.io/projected/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-kube-api-access-tljhf\") pod \"cinder-db-sync-lc5vk\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.866922 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-gqn2r"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.872467 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.874897 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.875088 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-m2ssv" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.884340 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-gqn2r"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.901542 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7d985fb7-srqqc"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.903519 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.918896 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.918983 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.924912 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-combined-ca-bundle\") pod \"neutron-db-sync-k6hqv\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.924978 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgds5\" (UniqueName: \"kubernetes.io/projected/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-kube-api-access-pgds5\") pod \"neutron-db-sync-k6hqv\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.925024 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-combined-ca-bundle\") pod \"barbican-db-sync-p579r\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " pod="openstack/barbican-db-sync-p579r" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.925076 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-config\") pod \"neutron-db-sync-k6hqv\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.925157 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-db-sync-config-data\") pod \"barbican-db-sync-p579r\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " pod="openstack/barbican-db-sync-p579r" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.925179 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srp5z\" (UniqueName: \"kubernetes.io/projected/0f0aa1af-46c3-4583-9140-149dddf9b048-kube-api-access-srp5z\") pod \"barbican-db-sync-p579r\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " pod="openstack/barbican-db-sync-p579r" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.950523 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srp5z\" (UniqueName: \"kubernetes.io/projected/0f0aa1af-46c3-4583-9140-149dddf9b048-kube-api-access-srp5z\") pod \"barbican-db-sync-p579r\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " pod="openstack/barbican-db-sync-p579r" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.950817 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7d985fb7-srqqc"] Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.955998 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-db-sync-config-data\") pod \"barbican-db-sync-p579r\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " pod="openstack/barbican-db-sync-p579r" Nov 25 09:08:24 crc kubenswrapper[4932]: I1125 09:08:24.960993 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-combined-ca-bundle\") pod \"barbican-db-sync-p579r\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " pod="openstack/barbican-db-sync-p579r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.037624 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-combined-ca-bundle\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.037670 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-config\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.037707 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.037750 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhqlb\" (UniqueName: \"kubernetes.io/projected/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-kube-api-access-zhqlb\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.037775 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpmkk\" (UniqueName: \"kubernetes.io/projected/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-kube-api-access-jpmkk\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.040094 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-combined-ca-bundle\") pod \"neutron-db-sync-k6hqv\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.040185 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgds5\" (UniqueName: \"kubernetes.io/projected/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-kube-api-access-pgds5\") pod \"neutron-db-sync-k6hqv\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.040237 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.040266 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-scripts\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.040376 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-config\") pod \"neutron-db-sync-k6hqv\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.040425 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-dns-svc\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.040468 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-logs\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.040531 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-config-data\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.044811 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-combined-ca-bundle\") pod \"neutron-db-sync-k6hqv\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.045549 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-config\") pod \"neutron-db-sync-k6hqv\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.051320 4932 generic.go:334] "Generic (PLEG): container finished" podID="ee8b5a64-5144-4fd9-a7b0-b12d318ababa" containerID="3f08ff77620cbdd809125bb2f2203d8655b5c5662687eebc59d5533391ed8c1a" exitCode=0 Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.051389 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-sqgkm" event={"ID":"ee8b5a64-5144-4fd9-a7b0-b12d318ababa","Type":"ContainerDied","Data":"3f08ff77620cbdd809125bb2f2203d8655b5c5662687eebc59d5533391ed8c1a"} Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.057667 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgds5\" (UniqueName: \"kubernetes.io/projected/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-kube-api-access-pgds5\") pod \"neutron-db-sync-k6hqv\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.117496 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p579r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.141931 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhqlb\" (UniqueName: \"kubernetes.io/projected/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-kube-api-access-zhqlb\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.141983 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpmkk\" (UniqueName: \"kubernetes.io/projected/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-kube-api-access-jpmkk\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.142054 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.142081 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-scripts\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.142145 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-dns-svc\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.142173 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-logs\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.142222 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-config-data\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.142259 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-combined-ca-bundle\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.142278 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-config\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.142308 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.143052 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.144071 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-logs\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.145139 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-dns-svc\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.146589 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-config\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.146609 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.146668 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-scripts\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.147692 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-config-data\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.154802 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-combined-ca-bundle\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.157660 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.164821 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpmkk\" (UniqueName: \"kubernetes.io/projected/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-kube-api-access-jpmkk\") pod \"placement-db-sync-gqn2r\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.165955 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhqlb\" (UniqueName: \"kubernetes.io/projected/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-kube-api-access-zhqlb\") pod \"dnsmasq-dns-5c7d985fb7-srqqc\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.233999 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.251523 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gqn2r" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.264342 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.351282 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8645678c8c-82dqz"] Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.432505 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-k7fzg"] Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.499936 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:08:25 crc kubenswrapper[4932]: W1125 09:08:25.563164 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9bb5dd5d_4c94_434e_880d_f47d84a21724.slice/crio-18e2abf1d4004e7d41a67c4d81e029de1b01e115126e8fa57418e6d53bbe2fca WatchSource:0}: Error finding container 18e2abf1d4004e7d41a67c4d81e029de1b01e115126e8fa57418e6d53bbe2fca: Status 404 returned error can't find the container with id 18e2abf1d4004e7d41a67c4d81e029de1b01e115126e8fa57418e6d53bbe2fca Nov 25 09:08:25 crc kubenswrapper[4932]: W1125 09:08:25.572897 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5ab5e673_3e6f_4980_bec0_497b78924861.slice/crio-367fa860c314ec9a105e47d41dfadf85968ca9a1566acc00e280c22b09109c8c WatchSource:0}: Error finding container 367fa860c314ec9a105e47d41dfadf85968ca9a1566acc00e280c22b09109c8c: Status 404 returned error can't find the container with id 367fa860c314ec9a105e47d41dfadf85968ca9a1566acc00e280c22b09109c8c Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.668127 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-lc5vk"] Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.693748 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-p579r"] Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.819365 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-k6hqv"] Nov 25 09:08:25 crc kubenswrapper[4932]: W1125 09:08:25.823539 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode12148ff_1b2e_4c34_85c0_ca43747a2eb4.slice/crio-63cef85d075ed786b0d7093447d9525278c47ddd5f5b8887aa8b3d69eb0534ca WatchSource:0}: Error finding container 63cef85d075ed786b0d7093447d9525278c47ddd5f5b8887aa8b3d69eb0534ca: Status 404 returned error can't find the container with id 63cef85d075ed786b0d7093447d9525278c47ddd5f5b8887aa8b3d69eb0534ca Nov 25 09:08:25 crc kubenswrapper[4932]: I1125 09:08:25.985844 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7d985fb7-srqqc"] Nov 25 09:08:26 crc kubenswrapper[4932]: W1125 09:08:26.002642 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod618f7c4b_5a2d_4eea_ab0e_e8d9aacc95e0.slice/crio-a387dd1dac98b7b2485061e6163e5d75dec19fd947ba58eaa114fa5bb79d0a11 WatchSource:0}: Error finding container a387dd1dac98b7b2485061e6163e5d75dec19fd947ba58eaa114fa5bb79d0a11: Status 404 returned error can't find the container with id a387dd1dac98b7b2485061e6163e5d75dec19fd947ba58eaa114fa5bb79d0a11 Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.054454 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-gqn2r"] Nov 25 09:08:26 crc kubenswrapper[4932]: W1125 09:08:26.057720 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4a545d2_ff3c_4a27_b210_4803cdbf3c86.slice/crio-ff2553a1d7dcb8a83bdf4f7ff058dc1daaa305d6794ac9410999fd741cc9a681 WatchSource:0}: Error finding container ff2553a1d7dcb8a83bdf4f7ff058dc1daaa305d6794ac9410999fd741cc9a681: Status 404 returned error can't find the container with id ff2553a1d7dcb8a83bdf4f7ff058dc1daaa305d6794ac9410999fd741cc9a681 Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.061749 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p579r" event={"ID":"0f0aa1af-46c3-4583-9140-149dddf9b048","Type":"ContainerStarted","Data":"f550a4806e0785ad0522b57a70eefee34e9ef89616881712a28263a57fc6975d"} Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.064097 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-k7fzg" event={"ID":"5ab5e673-3e6f-4980-bec0-497b78924861","Type":"ContainerStarted","Data":"367fa860c314ec9a105e47d41dfadf85968ca9a1566acc00e280c22b09109c8c"} Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.065041 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lc5vk" event={"ID":"e14c1b6a-a83b-47fc-8fac-36468c1b4df5","Type":"ContainerStarted","Data":"f67112bfa94da491ace2c4570bb536dd81d65d8e78cfaf5e5b46f24359dba0ce"} Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.066574 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8645678c8c-82dqz" event={"ID":"edfa0291-1c8e-4744-be99-9674a93b10d2","Type":"ContainerStarted","Data":"7d5253a4a20468bfaa9a59b7491ef1d8a0edbefca2773c9f6d72009577daef8f"} Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.070786 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" event={"ID":"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0","Type":"ContainerStarted","Data":"a387dd1dac98b7b2485061e6163e5d75dec19fd947ba58eaa114fa5bb79d0a11"} Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.073178 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bb5dd5d-4c94-434e-880d-f47d84a21724","Type":"ContainerStarted","Data":"18e2abf1d4004e7d41a67c4d81e029de1b01e115126e8fa57418e6d53bbe2fca"} Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.074646 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-k6hqv" event={"ID":"e12148ff-1b2e-4c34-85c0-ca43747a2eb4","Type":"ContainerStarted","Data":"63cef85d075ed786b0d7093447d9525278c47ddd5f5b8887aa8b3d69eb0534ca"} Nov 25 09:08:26 crc kubenswrapper[4932]: E1125 09:08:26.496638 4932 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podedfa0291_1c8e_4744_be99_9674a93b10d2.slice/crio-conmon-6ec706979ece54d3cdd903b70006c3ea8e1090c06784bee729fcc5c3894b82ad.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podedfa0291_1c8e_4744_be99_9674a93b10d2.slice/crio-6ec706979ece54d3cdd903b70006c3ea8e1090c06784bee729fcc5c3894b82ad.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.628007 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-sqgkm" Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.780817 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqqmz\" (UniqueName: \"kubernetes.io/projected/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-kube-api-access-jqqmz\") pod \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.780938 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-db-sync-config-data\") pod \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.780979 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-config-data\") pod \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.781093 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-combined-ca-bundle\") pod \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\" (UID: \"ee8b5a64-5144-4fd9-a7b0-b12d318ababa\") " Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.789959 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.797003 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-kube-api-access-jqqmz" (OuterVolumeSpecName: "kube-api-access-jqqmz") pod "ee8b5a64-5144-4fd9-a7b0-b12d318ababa" (UID: "ee8b5a64-5144-4fd9-a7b0-b12d318ababa"). InnerVolumeSpecName "kube-api-access-jqqmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.805843 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ee8b5a64-5144-4fd9-a7b0-b12d318ababa" (UID: "ee8b5a64-5144-4fd9-a7b0-b12d318ababa"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.839636 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-config-data" (OuterVolumeSpecName: "config-data") pod "ee8b5a64-5144-4fd9-a7b0-b12d318ababa" (UID: "ee8b5a64-5144-4fd9-a7b0-b12d318ababa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.885479 4932 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.885507 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.885517 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqqmz\" (UniqueName: \"kubernetes.io/projected/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-kube-api-access-jqqmz\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.896494 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee8b5a64-5144-4fd9-a7b0-b12d318ababa" (UID: "ee8b5a64-5144-4fd9-a7b0-b12d318ababa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:26 crc kubenswrapper[4932]: I1125 09:08:26.988550 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee8b5a64-5144-4fd9-a7b0-b12d318ababa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.102962 4932 generic.go:334] "Generic (PLEG): container finished" podID="618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" containerID="d1d4d6f0c7be1eb38e98b5f5873ba1f5f79feea71dc805b1a7197dad3e8b9719" exitCode=0 Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.103079 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" event={"ID":"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0","Type":"ContainerDied","Data":"d1d4d6f0c7be1eb38e98b5f5873ba1f5f79feea71dc805b1a7197dad3e8b9719"} Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.112922 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-sqgkm" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.112952 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-sqgkm" event={"ID":"ee8b5a64-5144-4fd9-a7b0-b12d318ababa","Type":"ContainerDied","Data":"139de3d285e81294dfbc2f10635ac0357bbc9f04a6997419a866d39b32defcc2"} Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.113003 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="139de3d285e81294dfbc2f10635ac0357bbc9f04a6997419a866d39b32defcc2" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.118921 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-k6hqv" event={"ID":"e12148ff-1b2e-4c34-85c0-ca43747a2eb4","Type":"ContainerStarted","Data":"04f68200aa484dc2dc939e490dcf4ff88ff89773a7c654ea9b8bce2bac1b8aaf"} Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.158667 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-k7fzg" event={"ID":"5ab5e673-3e6f-4980-bec0-497b78924861","Type":"ContainerStarted","Data":"0e68c9c73f7ebc342beac473b5b54d359282071ad585cbf463817d807a73bca8"} Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.171903 4932 generic.go:334] "Generic (PLEG): container finished" podID="edfa0291-1c8e-4744-be99-9674a93b10d2" containerID="6ec706979ece54d3cdd903b70006c3ea8e1090c06784bee729fcc5c3894b82ad" exitCode=0 Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.172066 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8645678c8c-82dqz" event={"ID":"edfa0291-1c8e-4744-be99-9674a93b10d2","Type":"ContainerDied","Data":"6ec706979ece54d3cdd903b70006c3ea8e1090c06784bee729fcc5c3894b82ad"} Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.175607 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gqn2r" event={"ID":"d4a545d2-ff3c-4a27-b210-4803cdbf3c86","Type":"ContainerStarted","Data":"ff2553a1d7dcb8a83bdf4f7ff058dc1daaa305d6794ac9410999fd741cc9a681"} Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.177050 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-k6hqv" podStartSLOduration=3.177034171 podStartE2EDuration="3.177034171s" podCreationTimestamp="2025-11-25 09:08:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:08:27.162074723 +0000 UTC m=+1167.288104306" watchObservedRunningTime="2025-11-25 09:08:27.177034171 +0000 UTC m=+1167.303063734" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.205277 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-k7fzg" podStartSLOduration=3.205256776 podStartE2EDuration="3.205256776s" podCreationTimestamp="2025-11-25 09:08:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:08:27.184588042 +0000 UTC m=+1167.310617615" watchObservedRunningTime="2025-11-25 09:08:27.205256776 +0000 UTC m=+1167.331286359" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.480339 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7d985fb7-srqqc"] Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.514705 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf79d967-28wbk"] Nov 25 09:08:27 crc kubenswrapper[4932]: E1125 09:08:27.515080 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee8b5a64-5144-4fd9-a7b0-b12d318ababa" containerName="glance-db-sync" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.515093 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee8b5a64-5144-4fd9-a7b0-b12d318ababa" containerName="glance-db-sync" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.515307 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee8b5a64-5144-4fd9-a7b0-b12d318ababa" containerName="glance-db-sync" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.516134 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.556527 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf79d967-28wbk"] Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.618066 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-sb\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.618112 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-dns-svc\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.618141 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-nb\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.618166 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5fb9\" (UniqueName: \"kubernetes.io/projected/db6788e6-9095-4e4f-b7d2-0efe04074361-kube-api-access-k5fb9\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.618242 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-config\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.728487 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-config\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.735298 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-config\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.735915 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-sb\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.736765 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-dns-svc\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.736852 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-sb\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.739717 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-dns-svc\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.744974 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-nb\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.745057 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5fb9\" (UniqueName: \"kubernetes.io/projected/db6788e6-9095-4e4f-b7d2-0efe04074361-kube-api-access-k5fb9\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.746577 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-nb\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.772743 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5fb9\" (UniqueName: \"kubernetes.io/projected/db6788e6-9095-4e4f-b7d2-0efe04074361-kube-api-access-k5fb9\") pod \"dnsmasq-dns-cf79d967-28wbk\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.777255 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.855218 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.948333 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-config\") pod \"edfa0291-1c8e-4744-be99-9674a93b10d2\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.948539 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tlv5\" (UniqueName: \"kubernetes.io/projected/edfa0291-1c8e-4744-be99-9674a93b10d2-kube-api-access-9tlv5\") pod \"edfa0291-1c8e-4744-be99-9674a93b10d2\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.948576 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-sb\") pod \"edfa0291-1c8e-4744-be99-9674a93b10d2\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.948768 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-nb\") pod \"edfa0291-1c8e-4744-be99-9674a93b10d2\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.948853 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-dns-svc\") pod \"edfa0291-1c8e-4744-be99-9674a93b10d2\" (UID: \"edfa0291-1c8e-4744-be99-9674a93b10d2\") " Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.957487 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edfa0291-1c8e-4744-be99-9674a93b10d2-kube-api-access-9tlv5" (OuterVolumeSpecName: "kube-api-access-9tlv5") pod "edfa0291-1c8e-4744-be99-9674a93b10d2" (UID: "edfa0291-1c8e-4744-be99-9674a93b10d2"). InnerVolumeSpecName "kube-api-access-9tlv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.975989 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "edfa0291-1c8e-4744-be99-9674a93b10d2" (UID: "edfa0291-1c8e-4744-be99-9674a93b10d2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.984747 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-config" (OuterVolumeSpecName: "config") pod "edfa0291-1c8e-4744-be99-9674a93b10d2" (UID: "edfa0291-1c8e-4744-be99-9674a93b10d2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.989397 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "edfa0291-1c8e-4744-be99-9674a93b10d2" (UID: "edfa0291-1c8e-4744-be99-9674a93b10d2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:27 crc kubenswrapper[4932]: I1125 09:08:27.998624 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "edfa0291-1c8e-4744-be99-9674a93b10d2" (UID: "edfa0291-1c8e-4744-be99-9674a93b10d2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.051120 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tlv5\" (UniqueName: \"kubernetes.io/projected/edfa0291-1c8e-4744-be99-9674a93b10d2-kube-api-access-9tlv5\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.051157 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.051170 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.051197 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.051210 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edfa0291-1c8e-4744-be99-9674a93b10d2-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.199357 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" event={"ID":"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0","Type":"ContainerStarted","Data":"18d0f9e0750606b9f3aac543160d933a446a6cb4679b227dc642ecb5a707dd9f"} Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.200852 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.207246 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8645678c8c-82dqz" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.208097 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8645678c8c-82dqz" event={"ID":"edfa0291-1c8e-4744-be99-9674a93b10d2","Type":"ContainerDied","Data":"7d5253a4a20468bfaa9a59b7491ef1d8a0edbefca2773c9f6d72009577daef8f"} Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.208164 4932 scope.go:117] "RemoveContainer" containerID="6ec706979ece54d3cdd903b70006c3ea8e1090c06784bee729fcc5c3894b82ad" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.233558 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" podStartSLOduration=4.233540637 podStartE2EDuration="4.233540637s" podCreationTimestamp="2025-11-25 09:08:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:08:28.220572297 +0000 UTC m=+1168.346601870" watchObservedRunningTime="2025-11-25 09:08:28.233540637 +0000 UTC m=+1168.359570190" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.325972 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8645678c8c-82dqz"] Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.338317 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8645678c8c-82dqz"] Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.365155 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf79d967-28wbk"] Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.387780 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:08:28 crc kubenswrapper[4932]: E1125 09:08:28.388134 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edfa0291-1c8e-4744-be99-9674a93b10d2" containerName="init" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.388150 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="edfa0291-1c8e-4744-be99-9674a93b10d2" containerName="init" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.388398 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="edfa0291-1c8e-4744-be99-9674a93b10d2" containerName="init" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.389374 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.394594 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.394710 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.399483 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-btq26" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.402364 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.560848 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.561411 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.561484 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-config-data\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.561579 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-logs\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.561640 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mfkv\" (UniqueName: \"kubernetes.io/projected/07658137-1e65-459c-b55d-3548a2210b30-kube-api-access-9mfkv\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.561734 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.561958 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-scripts\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.619256 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edfa0291-1c8e-4744-be99-9674a93b10d2" path="/var/lib/kubelet/pods/edfa0291-1c8e-4744-be99-9674a93b10d2/volumes" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.669861 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-scripts\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.669941 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.669992 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.670018 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-config-data\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.670069 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-logs\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.670094 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mfkv\" (UniqueName: \"kubernetes.io/projected/07658137-1e65-459c-b55d-3548a2210b30-kube-api-access-9mfkv\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.670140 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.672232 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.673610 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.676064 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.683679 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.688023 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.688121 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-logs\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.688221 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.690613 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-scripts\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.705996 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-config-data\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.713438 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mfkv\" (UniqueName: \"kubernetes.io/projected/07658137-1e65-459c-b55d-3548a2210b30-kube-api-access-9mfkv\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.737419 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " pod="openstack/glance-default-external-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.745365 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.873765 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-logs\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.873883 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.874614 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.874649 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-config-data\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.874763 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm4tr\" (UniqueName: \"kubernetes.io/projected/eec87a7e-5df8-47d8-8584-81d832f34a02-kube-api-access-hm4tr\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.874827 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.874860 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-scripts\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.976664 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.976725 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-scripts\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.976800 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-logs\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.976838 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.976912 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.976935 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-config-data\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.976977 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm4tr\" (UniqueName: \"kubernetes.io/projected/eec87a7e-5df8-47d8-8584-81d832f34a02-kube-api-access-hm4tr\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.977181 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.977311 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.978161 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-logs\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.982468 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.982712 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-config-data\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.984821 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-scripts\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:28 crc kubenswrapper[4932]: I1125 09:08:28.993428 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm4tr\" (UniqueName: \"kubernetes.io/projected/eec87a7e-5df8-47d8-8584-81d832f34a02-kube-api-access-hm4tr\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:29 crc kubenswrapper[4932]: I1125 09:08:29.007504 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:29 crc kubenswrapper[4932]: I1125 09:08:29.018730 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:08:29 crc kubenswrapper[4932]: I1125 09:08:29.158766 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:08:29 crc kubenswrapper[4932]: I1125 09:08:29.240097 4932 generic.go:334] "Generic (PLEG): container finished" podID="db6788e6-9095-4e4f-b7d2-0efe04074361" containerID="3b3421dd62e68ac97bc91ed0787da5646e35ef1814ecacbfc3076ea4e847d0a4" exitCode=0 Nov 25 09:08:29 crc kubenswrapper[4932]: I1125 09:08:29.240497 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" podUID="618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" containerName="dnsmasq-dns" containerID="cri-o://18d0f9e0750606b9f3aac543160d933a446a6cb4679b227dc642ecb5a707dd9f" gracePeriod=10 Nov 25 09:08:29 crc kubenswrapper[4932]: I1125 09:08:29.242523 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf79d967-28wbk" event={"ID":"db6788e6-9095-4e4f-b7d2-0efe04074361","Type":"ContainerDied","Data":"3b3421dd62e68ac97bc91ed0787da5646e35ef1814ecacbfc3076ea4e847d0a4"} Nov 25 09:08:29 crc kubenswrapper[4932]: I1125 09:08:29.242588 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf79d967-28wbk" event={"ID":"db6788e6-9095-4e4f-b7d2-0efe04074361","Type":"ContainerStarted","Data":"341e8da74be88ed15f39b3fc000721cfe332115eb8445f01e0e5488afbffe374"} Nov 25 09:08:29 crc kubenswrapper[4932]: I1125 09:08:29.589450 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:08:29 crc kubenswrapper[4932]: I1125 09:08:29.808375 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:08:30 crc kubenswrapper[4932]: I1125 09:08:30.265360 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf79d967-28wbk" event={"ID":"db6788e6-9095-4e4f-b7d2-0efe04074361","Type":"ContainerStarted","Data":"870f7f3b9f9ce5304a5a4f411f73693726174f2d71c3db4819adbc089c91149f"} Nov 25 09:08:30 crc kubenswrapper[4932]: I1125 09:08:30.266467 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:30 crc kubenswrapper[4932]: I1125 09:08:30.268522 4932 generic.go:334] "Generic (PLEG): container finished" podID="618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" containerID="18d0f9e0750606b9f3aac543160d933a446a6cb4679b227dc642ecb5a707dd9f" exitCode=0 Nov 25 09:08:30 crc kubenswrapper[4932]: I1125 09:08:30.268566 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" event={"ID":"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0","Type":"ContainerDied","Data":"18d0f9e0750606b9f3aac543160d933a446a6cb4679b227dc642ecb5a707dd9f"} Nov 25 09:08:30 crc kubenswrapper[4932]: I1125 09:08:30.287389 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cf79d967-28wbk" podStartSLOduration=3.287367909 podStartE2EDuration="3.287367909s" podCreationTimestamp="2025-11-25 09:08:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:08:30.282992284 +0000 UTC m=+1170.409021847" watchObservedRunningTime="2025-11-25 09:08:30.287367909 +0000 UTC m=+1170.413397472" Nov 25 09:08:31 crc kubenswrapper[4932]: I1125 09:08:31.281298 4932 generic.go:334] "Generic (PLEG): container finished" podID="5ab5e673-3e6f-4980-bec0-497b78924861" containerID="0e68c9c73f7ebc342beac473b5b54d359282071ad585cbf463817d807a73bca8" exitCode=0 Nov 25 09:08:31 crc kubenswrapper[4932]: I1125 09:08:31.281371 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-k7fzg" event={"ID":"5ab5e673-3e6f-4980-bec0-497b78924861","Type":"ContainerDied","Data":"0e68c9c73f7ebc342beac473b5b54d359282071ad585cbf463817d807a73bca8"} Nov 25 09:08:31 crc kubenswrapper[4932]: W1125 09:08:31.786279 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07658137_1e65_459c_b55d_3548a2210b30.slice/crio-bba7de732a6faabd98d1de172e45b408e28b0bbe2fac0aee6d655cfcf4b2ac3e WatchSource:0}: Error finding container bba7de732a6faabd98d1de172e45b408e28b0bbe2fac0aee6d655cfcf4b2ac3e: Status 404 returned error can't find the container with id bba7de732a6faabd98d1de172e45b408e28b0bbe2fac0aee6d655cfcf4b2ac3e Nov 25 09:08:31 crc kubenswrapper[4932]: W1125 09:08:31.791651 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeec87a7e_5df8_47d8_8584_81d832f34a02.slice/crio-caec1dd24f8ac27f408e3a7bf8127f68cc3080fab41dd7e4dc14167a7bdae73e WatchSource:0}: Error finding container caec1dd24f8ac27f408e3a7bf8127f68cc3080fab41dd7e4dc14167a7bdae73e: Status 404 returned error can't find the container with id caec1dd24f8ac27f408e3a7bf8127f68cc3080fab41dd7e4dc14167a7bdae73e Nov 25 09:08:31 crc kubenswrapper[4932]: I1125 09:08:31.873038 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.041241 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhqlb\" (UniqueName: \"kubernetes.io/projected/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-kube-api-access-zhqlb\") pod \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.041595 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-sb\") pod \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.041672 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-config\") pod \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.041697 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-dns-svc\") pod \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.041766 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-nb\") pod \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\" (UID: \"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0\") " Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.048416 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-kube-api-access-zhqlb" (OuterVolumeSpecName: "kube-api-access-zhqlb") pod "618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" (UID: "618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0"). InnerVolumeSpecName "kube-api-access-zhqlb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.087534 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-config" (OuterVolumeSpecName: "config") pod "618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" (UID: "618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.102637 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" (UID: "618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.103091 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" (UID: "618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.107903 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" (UID: "618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.145003 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.145033 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.145042 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.145054 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhqlb\" (UniqueName: \"kubernetes.io/projected/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-kube-api-access-zhqlb\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.145064 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.290900 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"eec87a7e-5df8-47d8-8584-81d832f34a02","Type":"ContainerStarted","Data":"caec1dd24f8ac27f408e3a7bf8127f68cc3080fab41dd7e4dc14167a7bdae73e"} Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.293682 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.293712 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7d985fb7-srqqc" event={"ID":"618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0","Type":"ContainerDied","Data":"a387dd1dac98b7b2485061e6163e5d75dec19fd947ba58eaa114fa5bb79d0a11"} Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.293775 4932 scope.go:117] "RemoveContainer" containerID="18d0f9e0750606b9f3aac543160d933a446a6cb4679b227dc642ecb5a707dd9f" Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.296532 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07658137-1e65-459c-b55d-3548a2210b30","Type":"ContainerStarted","Data":"bba7de732a6faabd98d1de172e45b408e28b0bbe2fac0aee6d655cfcf4b2ac3e"} Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.386820 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7d985fb7-srqqc"] Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.406967 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7d985fb7-srqqc"] Nov 25 09:08:32 crc kubenswrapper[4932]: I1125 09:08:32.622083 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" path="/var/lib/kubelet/pods/618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0/volumes" Nov 25 09:08:34 crc kubenswrapper[4932]: I1125 09:08:34.675499 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:08:34 crc kubenswrapper[4932]: I1125 09:08:34.757074 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:08:37 crc kubenswrapper[4932]: I1125 09:08:37.857506 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:08:37 crc kubenswrapper[4932]: I1125 09:08:37.924707 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2dmrz"] Nov 25 09:08:37 crc kubenswrapper[4932]: I1125 09:08:37.925161 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" podUID="db4b005b-e018-4162-8056-be07edd72b71" containerName="dnsmasq-dns" containerID="cri-o://aaeed985b6e5d9b80c261c6498b86f540356061013423e09033041bfc2e3cad0" gracePeriod=10 Nov 25 09:08:39 crc kubenswrapper[4932]: I1125 09:08:39.376485 4932 generic.go:334] "Generic (PLEG): container finished" podID="db4b005b-e018-4162-8056-be07edd72b71" containerID="aaeed985b6e5d9b80c261c6498b86f540356061013423e09033041bfc2e3cad0" exitCode=0 Nov 25 09:08:39 crc kubenswrapper[4932]: I1125 09:08:39.376572 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" event={"ID":"db4b005b-e018-4162-8056-be07edd72b71","Type":"ContainerDied","Data":"aaeed985b6e5d9b80c261c6498b86f540356061013423e09033041bfc2e3cad0"} Nov 25 09:08:41 crc kubenswrapper[4932]: I1125 09:08:41.209671 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" podUID="db4b005b-e018-4162-8056-be07edd72b71" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.118:5353: connect: connection refused" Nov 25 09:08:41 crc kubenswrapper[4932]: I1125 09:08:41.724831 4932 scope.go:117] "RemoveContainer" containerID="d1d4d6f0c7be1eb38e98b5f5873ba1f5f79feea71dc805b1a7197dad3e8b9719" Nov 25 09:08:42 crc kubenswrapper[4932]: E1125 09:08:42.108631 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645" Nov 25 09:08:42 crc kubenswrapper[4932]: E1125 09:08:42.108787 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-srp5z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-p579r_openstack(0f0aa1af-46c3-4583-9140-149dddf9b048): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:08:42 crc kubenswrapper[4932]: E1125 09:08:42.109955 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-p579r" podUID="0f0aa1af-46c3-4583-9140-149dddf9b048" Nov 25 09:08:42 crc kubenswrapper[4932]: E1125 09:08:42.406939 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645\\\"\"" pod="openstack/barbican-db-sync-p579r" podUID="0f0aa1af-46c3-4583-9140-149dddf9b048" Nov 25 09:08:42 crc kubenswrapper[4932]: E1125 09:08:42.474333 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140" Nov 25 09:08:42 crc kubenswrapper[4932]: E1125 09:08:42.474500 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n565h6ch5b5h549h685h597hd8h677h69hd4h5cch644hfh66bh58h568hdh574h79h684h7h54ch78h5dfh654h74h97h5f9hf5h66h598h587q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bfwws,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9bb5dd5d-4c94-434e-880d-f47d84a21724): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:08:44 crc kubenswrapper[4932]: E1125 09:08:44.038795 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099" Nov 25 09:08:44 crc kubenswrapper[4932]: E1125 09:08:44.039251 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jpmkk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-gqn2r_openstack(d4a545d2-ff3c-4a27-b210-4803cdbf3c86): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:08:44 crc kubenswrapper[4932]: E1125 09:08:44.040423 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-gqn2r" podUID="d4a545d2-ff3c-4a27-b210-4803cdbf3c86" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.079999 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.090224 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rtq9\" (UniqueName: \"kubernetes.io/projected/5ab5e673-3e6f-4980-bec0-497b78924861-kube-api-access-8rtq9\") pod \"5ab5e673-3e6f-4980-bec0-497b78924861\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.090621 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-combined-ca-bundle\") pod \"5ab5e673-3e6f-4980-bec0-497b78924861\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.090867 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-credential-keys\") pod \"5ab5e673-3e6f-4980-bec0-497b78924861\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.090928 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-scripts\") pod \"5ab5e673-3e6f-4980-bec0-497b78924861\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.091043 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-fernet-keys\") pod \"5ab5e673-3e6f-4980-bec0-497b78924861\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.091097 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-config-data\") pod \"5ab5e673-3e6f-4980-bec0-497b78924861\" (UID: \"5ab5e673-3e6f-4980-bec0-497b78924861\") " Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.097589 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5ab5e673-3e6f-4980-bec0-497b78924861" (UID: "5ab5e673-3e6f-4980-bec0-497b78924861"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.097673 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ab5e673-3e6f-4980-bec0-497b78924861-kube-api-access-8rtq9" (OuterVolumeSpecName: "kube-api-access-8rtq9") pod "5ab5e673-3e6f-4980-bec0-497b78924861" (UID: "5ab5e673-3e6f-4980-bec0-497b78924861"). InnerVolumeSpecName "kube-api-access-8rtq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.099678 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "5ab5e673-3e6f-4980-bec0-497b78924861" (UID: "5ab5e673-3e6f-4980-bec0-497b78924861"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.101847 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-scripts" (OuterVolumeSpecName: "scripts") pod "5ab5e673-3e6f-4980-bec0-497b78924861" (UID: "5ab5e673-3e6f-4980-bec0-497b78924861"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.120636 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-config-data" (OuterVolumeSpecName: "config-data") pod "5ab5e673-3e6f-4980-bec0-497b78924861" (UID: "5ab5e673-3e6f-4980-bec0-497b78924861"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.133173 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ab5e673-3e6f-4980-bec0-497b78924861" (UID: "5ab5e673-3e6f-4980-bec0-497b78924861"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.195728 4932 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.195766 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.195780 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rtq9\" (UniqueName: \"kubernetes.io/projected/5ab5e673-3e6f-4980-bec0-497b78924861-kube-api-access-8rtq9\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.195793 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.195806 4932 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.195816 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ab5e673-3e6f-4980-bec0-497b78924861-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.424432 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-k7fzg" Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.424580 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-k7fzg" event={"ID":"5ab5e673-3e6f-4980-bec0-497b78924861","Type":"ContainerDied","Data":"367fa860c314ec9a105e47d41dfadf85968ca9a1566acc00e280c22b09109c8c"} Nov 25 09:08:44 crc kubenswrapper[4932]: I1125 09:08:44.424622 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="367fa860c314ec9a105e47d41dfadf85968ca9a1566acc00e280c22b09109c8c" Nov 25 09:08:44 crc kubenswrapper[4932]: E1125 09:08:44.426883 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099\\\"\"" pod="openstack/placement-db-sync-gqn2r" podUID="d4a545d2-ff3c-4a27-b210-4803cdbf3c86" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.158413 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-k7fzg"] Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.165850 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-k7fzg"] Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.258874 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-xlpr4"] Nov 25 09:08:45 crc kubenswrapper[4932]: E1125 09:08:45.259983 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" containerName="init" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.260002 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" containerName="init" Nov 25 09:08:45 crc kubenswrapper[4932]: E1125 09:08:45.260019 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ab5e673-3e6f-4980-bec0-497b78924861" containerName="keystone-bootstrap" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.260025 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ab5e673-3e6f-4980-bec0-497b78924861" containerName="keystone-bootstrap" Nov 25 09:08:45 crc kubenswrapper[4932]: E1125 09:08:45.260048 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" containerName="dnsmasq-dns" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.260054 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" containerName="dnsmasq-dns" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.260672 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="618f7c4b-5a2d-4eea-ab0e-e8d9aacc95e0" containerName="dnsmasq-dns" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.260716 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ab5e673-3e6f-4980-bec0-497b78924861" containerName="keystone-bootstrap" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.262046 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.271814 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rgvcq" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.272108 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.272341 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.272981 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.273247 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.273604 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-xlpr4"] Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.321315 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntbg5\" (UniqueName: \"kubernetes.io/projected/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-kube-api-access-ntbg5\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.321636 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-combined-ca-bundle\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.321692 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-config-data\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.321737 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-credential-keys\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.321757 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-fernet-keys\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.321842 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-scripts\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.432458 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-config-data\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.432528 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-credential-keys\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.432566 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-fernet-keys\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.432613 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-scripts\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.432711 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntbg5\" (UniqueName: \"kubernetes.io/projected/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-kube-api-access-ntbg5\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.432736 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-combined-ca-bundle\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.719145 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-fernet-keys\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.720123 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-config-data\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.737696 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-combined-ca-bundle\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.957639 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-credential-keys\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.959135 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-scripts\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:45 crc kubenswrapper[4932]: I1125 09:08:45.971000 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntbg5\" (UniqueName: \"kubernetes.io/projected/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-kube-api-access-ntbg5\") pod \"keystone-bootstrap-xlpr4\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:46 crc kubenswrapper[4932]: I1125 09:08:46.012895 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:08:46 crc kubenswrapper[4932]: I1125 09:08:46.617282 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ab5e673-3e6f-4980-bec0-497b78924861" path="/var/lib/kubelet/pods/5ab5e673-3e6f-4980-bec0-497b78924861/volumes" Nov 25 09:08:51 crc kubenswrapper[4932]: I1125 09:08:51.210404 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" podUID="db4b005b-e018-4162-8056-be07edd72b71" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.118:5353: i/o timeout" Nov 25 09:08:54 crc kubenswrapper[4932]: E1125 09:08:54.359547 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75" Nov 25 09:08:54 crc kubenswrapper[4932]: E1125 09:08:54.360096 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:container-server,Image:quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75,Command:[/usr/bin/swift-container-server /etc/swift/container-server.conf.d -v],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:container,HostPort:0,ContainerPort:6201,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5b7h56h9dh94h67bh697h95h55hbh555h556h675h5fdh57dh579h5fbh64fh5c9h687hb6h678h5d4h549h54h98h8ch564h5bh5bch55dhc8hf8q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:swift,ReadOnly:false,MountPath:/srv/node/pv,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-swift,ReadOnly:false,MountPath:/etc/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cache,ReadOnly:false,MountPath:/var/cache/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:lock,ReadOnly:false,MountPath:/var/lock,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-skkg2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42445,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-storage-0_openstack(81ccee4a-f414-4007-ae17-b440b55dea5f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:08:54 crc kubenswrapper[4932]: E1125 09:08:54.386347 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\"]" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.455625 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.525518 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-config\") pod \"db4b005b-e018-4162-8056-be07edd72b71\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.525634 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhcgf\" (UniqueName: \"kubernetes.io/projected/db4b005b-e018-4162-8056-be07edd72b71-kube-api-access-bhcgf\") pod \"db4b005b-e018-4162-8056-be07edd72b71\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.525669 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-nb\") pod \"db4b005b-e018-4162-8056-be07edd72b71\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.525687 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-dns-svc\") pod \"db4b005b-e018-4162-8056-be07edd72b71\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.525707 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-sb\") pod \"db4b005b-e018-4162-8056-be07edd72b71\" (UID: \"db4b005b-e018-4162-8056-be07edd72b71\") " Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.534685 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db4b005b-e018-4162-8056-be07edd72b71-kube-api-access-bhcgf" (OuterVolumeSpecName: "kube-api-access-bhcgf") pod "db4b005b-e018-4162-8056-be07edd72b71" (UID: "db4b005b-e018-4162-8056-be07edd72b71"). InnerVolumeSpecName "kube-api-access-bhcgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.553153 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" event={"ID":"db4b005b-e018-4162-8056-be07edd72b71","Type":"ContainerDied","Data":"0898c8cb2aa48c00a35820ceb0c0645e06fba6cea078c9b37afde0370ffd71b9"} Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.553221 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.553227 4932 scope.go:117] "RemoveContainer" containerID="aaeed985b6e5d9b80c261c6498b86f540356061013423e09033041bfc2e3cad0" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.570075 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-config" (OuterVolumeSpecName: "config") pod "db4b005b-e018-4162-8056-be07edd72b71" (UID: "db4b005b-e018-4162-8056-be07edd72b71"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.572608 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "db4b005b-e018-4162-8056-be07edd72b71" (UID: "db4b005b-e018-4162-8056-be07edd72b71"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.575573 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "db4b005b-e018-4162-8056-be07edd72b71" (UID: "db4b005b-e018-4162-8056-be07edd72b71"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.575648 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "db4b005b-e018-4162-8056-be07edd72b71" (UID: "db4b005b-e018-4162-8056-be07edd72b71"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.627352 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhcgf\" (UniqueName: \"kubernetes.io/projected/db4b005b-e018-4162-8056-be07edd72b71-kube-api-access-bhcgf\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.627393 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.627498 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.627509 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.627517 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db4b005b-e018-4162-8056-be07edd72b71-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.874769 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2dmrz"] Nov 25 09:08:54 crc kubenswrapper[4932]: I1125 09:08:54.881055 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2dmrz"] Nov 25 09:08:55 crc kubenswrapper[4932]: I1125 09:08:55.547050 4932 scope.go:117] "RemoveContainer" containerID="cbef0b7a9f85a74a3a714541cd7021cd8b427005067e42bb833f83655ebddb19" Nov 25 09:08:55 crc kubenswrapper[4932]: E1125 09:08:55.556928 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879" Nov 25 09:08:55 crc kubenswrapper[4932]: E1125 09:08:55.557075 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tljhf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-lc5vk_openstack(e14c1b6a-a83b-47fc-8fac-36468c1b4df5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:08:55 crc kubenswrapper[4932]: E1125 09:08:55.559757 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-lc5vk" podUID="e14c1b6a-a83b-47fc-8fac-36468c1b4df5" Nov 25 09:08:55 crc kubenswrapper[4932]: E1125 09:08:55.853061 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-notification@sha256:aa1d3aaf6b394621ed4089a98e0a82b763f467e8b5c5db772f9fdf99fc86e333" Nov 25 09:08:55 crc kubenswrapper[4932]: E1125 09:08:55.853491 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-notification-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-notification@sha256:aa1d3aaf6b394621ed4089a98e0a82b763f467e8b5c5db772f9fdf99fc86e333,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n565h6ch5b5h549h685h597hd8h677h69hd4h5cch644hfh66bh58h568hdh574h79h684h7h54ch78h5dfh654h74h97h5f9hf5h66h598h587q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-notification-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bfwws,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/notificationhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9bb5dd5d-4c94-434e-880d-f47d84a21724): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 09:08:56 crc kubenswrapper[4932]: I1125 09:08:56.211179 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cf8bcbfcf-2dmrz" podUID="db4b005b-e018-4162-8056-be07edd72b71" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.118:5353: i/o timeout" Nov 25 09:08:56 crc kubenswrapper[4932]: I1125 09:08:56.302269 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-xlpr4"] Nov 25 09:08:56 crc kubenswrapper[4932]: W1125 09:08:56.304026 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7537a42d_2ff4_48ed_b6c4_3efc948e72ab.slice/crio-1950783652ed81e0e2f23585b3be7fbc564176816b531c124cdd2cd0a21daf33 WatchSource:0}: Error finding container 1950783652ed81e0e2f23585b3be7fbc564176816b531c124cdd2cd0a21daf33: Status 404 returned error can't find the container with id 1950783652ed81e0e2f23585b3be7fbc564176816b531c124cdd2cd0a21daf33 Nov 25 09:08:56 crc kubenswrapper[4932]: I1125 09:08:56.575865 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xlpr4" event={"ID":"7537a42d-2ff4-48ed-b6c4-3efc948e72ab","Type":"ContainerStarted","Data":"1950783652ed81e0e2f23585b3be7fbc564176816b531c124cdd2cd0a21daf33"} Nov 25 09:08:56 crc kubenswrapper[4932]: E1125 09:08:56.581149 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879\\\"\"" pod="openstack/cinder-db-sync-lc5vk" podUID="e14c1b6a-a83b-47fc-8fac-36468c1b4df5" Nov 25 09:08:56 crc kubenswrapper[4932]: I1125 09:08:56.622305 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db4b005b-e018-4162-8056-be07edd72b71" path="/var/lib/kubelet/pods/db4b005b-e018-4162-8056-be07edd72b71/volumes" Nov 25 09:08:57 crc kubenswrapper[4932]: I1125 09:08:57.598274 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="eec87a7e-5df8-47d8-8584-81d832f34a02" containerName="glance-log" containerID="cri-o://d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd" gracePeriod=30 Nov 25 09:08:57 crc kubenswrapper[4932]: I1125 09:08:57.598869 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"eec87a7e-5df8-47d8-8584-81d832f34a02","Type":"ContainerStarted","Data":"ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301"} Nov 25 09:08:57 crc kubenswrapper[4932]: I1125 09:08:57.598892 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"eec87a7e-5df8-47d8-8584-81d832f34a02","Type":"ContainerStarted","Data":"d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd"} Nov 25 09:08:57 crc kubenswrapper[4932]: I1125 09:08:57.599120 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="eec87a7e-5df8-47d8-8584-81d832f34a02" containerName="glance-httpd" containerID="cri-o://ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301" gracePeriod=30 Nov 25 09:08:57 crc kubenswrapper[4932]: I1125 09:08:57.623086 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07658137-1e65-459c-b55d-3548a2210b30","Type":"ContainerStarted","Data":"3b5911e36ca8c2d0426bec1a4c8ddaa7ba763f8df5bd7bb8d64fc12c2006d4f4"} Nov 25 09:08:57 crc kubenswrapper[4932]: I1125 09:08:57.623131 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07658137-1e65-459c-b55d-3548a2210b30","Type":"ContainerStarted","Data":"3d63b7f3e59acd8d18c2a77be92bf8ce0d8b19d274262806a9c3824def2ee158"} Nov 25 09:08:57 crc kubenswrapper[4932]: I1125 09:08:57.623229 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="07658137-1e65-459c-b55d-3548a2210b30" containerName="glance-httpd" containerID="cri-o://3b5911e36ca8c2d0426bec1a4c8ddaa7ba763f8df5bd7bb8d64fc12c2006d4f4" gracePeriod=30 Nov 25 09:08:57 crc kubenswrapper[4932]: I1125 09:08:57.623231 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="07658137-1e65-459c-b55d-3548a2210b30" containerName="glance-log" containerID="cri-o://3d63b7f3e59acd8d18c2a77be92bf8ce0d8b19d274262806a9c3824def2ee158" gracePeriod=30 Nov 25 09:08:57 crc kubenswrapper[4932]: I1125 09:08:57.631340 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xlpr4" event={"ID":"7537a42d-2ff4-48ed-b6c4-3efc948e72ab","Type":"ContainerStarted","Data":"35b97667ea2f3c8f4d0ed84ea1858ea883985abf6696778a33a9ecd4a8814409"} Nov 25 09:08:57 crc kubenswrapper[4932]: I1125 09:08:57.649724 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=30.649705029 podStartE2EDuration="30.649705029s" podCreationTimestamp="2025-11-25 09:08:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:08:57.645221661 +0000 UTC m=+1197.771251224" watchObservedRunningTime="2025-11-25 09:08:57.649705029 +0000 UTC m=+1197.775734592" Nov 25 09:08:57 crc kubenswrapper[4932]: I1125 09:08:57.712726 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-xlpr4" podStartSLOduration=12.712700988 podStartE2EDuration="12.712700988s" podCreationTimestamp="2025-11-25 09:08:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:08:57.673101741 +0000 UTC m=+1197.799131304" watchObservedRunningTime="2025-11-25 09:08:57.712700988 +0000 UTC m=+1197.838730551" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.203141 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.230857 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=31.230840174 podStartE2EDuration="31.230840174s" podCreationTimestamp="2025-11-25 09:08:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:08:57.705754078 +0000 UTC m=+1197.831783641" watchObservedRunningTime="2025-11-25 09:08:58.230840174 +0000 UTC m=+1198.356869737" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.332437 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-config-data\") pod \"eec87a7e-5df8-47d8-8584-81d832f34a02\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.332820 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"eec87a7e-5df8-47d8-8584-81d832f34a02\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.332961 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-logs\") pod \"eec87a7e-5df8-47d8-8584-81d832f34a02\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.332993 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-combined-ca-bundle\") pod \"eec87a7e-5df8-47d8-8584-81d832f34a02\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.333076 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-scripts\") pod \"eec87a7e-5df8-47d8-8584-81d832f34a02\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.333127 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-httpd-run\") pod \"eec87a7e-5df8-47d8-8584-81d832f34a02\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.333159 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm4tr\" (UniqueName: \"kubernetes.io/projected/eec87a7e-5df8-47d8-8584-81d832f34a02-kube-api-access-hm4tr\") pod \"eec87a7e-5df8-47d8-8584-81d832f34a02\" (UID: \"eec87a7e-5df8-47d8-8584-81d832f34a02\") " Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.333494 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-logs" (OuterVolumeSpecName: "logs") pod "eec87a7e-5df8-47d8-8584-81d832f34a02" (UID: "eec87a7e-5df8-47d8-8584-81d832f34a02"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.333762 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.333782 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "eec87a7e-5df8-47d8-8584-81d832f34a02" (UID: "eec87a7e-5df8-47d8-8584-81d832f34a02"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.337934 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-scripts" (OuterVolumeSpecName: "scripts") pod "eec87a7e-5df8-47d8-8584-81d832f34a02" (UID: "eec87a7e-5df8-47d8-8584-81d832f34a02"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.337981 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eec87a7e-5df8-47d8-8584-81d832f34a02-kube-api-access-hm4tr" (OuterVolumeSpecName: "kube-api-access-hm4tr") pod "eec87a7e-5df8-47d8-8584-81d832f34a02" (UID: "eec87a7e-5df8-47d8-8584-81d832f34a02"). InnerVolumeSpecName "kube-api-access-hm4tr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.339931 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "eec87a7e-5df8-47d8-8584-81d832f34a02" (UID: "eec87a7e-5df8-47d8-8584-81d832f34a02"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.356938 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eec87a7e-5df8-47d8-8584-81d832f34a02" (UID: "eec87a7e-5df8-47d8-8584-81d832f34a02"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.379707 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-config-data" (OuterVolumeSpecName: "config-data") pod "eec87a7e-5df8-47d8-8584-81d832f34a02" (UID: "eec87a7e-5df8-47d8-8584-81d832f34a02"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.434971 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.435014 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.435025 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/eec87a7e-5df8-47d8-8584-81d832f34a02-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.435037 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hm4tr\" (UniqueName: \"kubernetes.io/projected/eec87a7e-5df8-47d8-8584-81d832f34a02-kube-api-access-hm4tr\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.435049 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eec87a7e-5df8-47d8-8584-81d832f34a02-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.435086 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.457133 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.536818 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.646281 4932 generic.go:334] "Generic (PLEG): container finished" podID="eec87a7e-5df8-47d8-8584-81d832f34a02" containerID="ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301" exitCode=143 Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.646316 4932 generic.go:334] "Generic (PLEG): container finished" podID="eec87a7e-5df8-47d8-8584-81d832f34a02" containerID="d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd" exitCode=143 Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.646351 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.646368 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"eec87a7e-5df8-47d8-8584-81d832f34a02","Type":"ContainerDied","Data":"ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301"} Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.646395 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"eec87a7e-5df8-47d8-8584-81d832f34a02","Type":"ContainerDied","Data":"d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd"} Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.646404 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"eec87a7e-5df8-47d8-8584-81d832f34a02","Type":"ContainerDied","Data":"caec1dd24f8ac27f408e3a7bf8127f68cc3080fab41dd7e4dc14167a7bdae73e"} Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.646419 4932 scope.go:117] "RemoveContainer" containerID="ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.649597 4932 generic.go:334] "Generic (PLEG): container finished" podID="07658137-1e65-459c-b55d-3548a2210b30" containerID="3b5911e36ca8c2d0426bec1a4c8ddaa7ba763f8df5bd7bb8d64fc12c2006d4f4" exitCode=143 Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.650403 4932 generic.go:334] "Generic (PLEG): container finished" podID="07658137-1e65-459c-b55d-3548a2210b30" containerID="3d63b7f3e59acd8d18c2a77be92bf8ce0d8b19d274262806a9c3824def2ee158" exitCode=143 Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.650870 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07658137-1e65-459c-b55d-3548a2210b30","Type":"ContainerDied","Data":"3b5911e36ca8c2d0426bec1a4c8ddaa7ba763f8df5bd7bb8d64fc12c2006d4f4"} Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.650951 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07658137-1e65-459c-b55d-3548a2210b30","Type":"ContainerDied","Data":"3d63b7f3e59acd8d18c2a77be92bf8ce0d8b19d274262806a9c3824def2ee158"} Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.653566 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p579r" event={"ID":"0f0aa1af-46c3-4583-9140-149dddf9b048","Type":"ContainerStarted","Data":"44c20a6528a98cf85e7eddf8dc1dcf7d3891a50d73c5bee0ebfb78f28694c629"} Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.678021 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.696155 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.698890 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-p579r" podStartSLOduration=2.575769394 podStartE2EDuration="34.698873441s" podCreationTimestamp="2025-11-25 09:08:24 +0000 UTC" firstStartedPulling="2025-11-25 09:08:25.751236002 +0000 UTC m=+1165.877265565" lastFinishedPulling="2025-11-25 09:08:57.874340049 +0000 UTC m=+1198.000369612" observedRunningTime="2025-11-25 09:08:58.681154853 +0000 UTC m=+1198.807184416" watchObservedRunningTime="2025-11-25 09:08:58.698873441 +0000 UTC m=+1198.824903004" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.716338 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:08:58 crc kubenswrapper[4932]: E1125 09:08:58.717116 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db4b005b-e018-4162-8056-be07edd72b71" containerName="dnsmasq-dns" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.717140 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="db4b005b-e018-4162-8056-be07edd72b71" containerName="dnsmasq-dns" Nov 25 09:08:58 crc kubenswrapper[4932]: E1125 09:08:58.717156 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eec87a7e-5df8-47d8-8584-81d832f34a02" containerName="glance-log" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.717162 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="eec87a7e-5df8-47d8-8584-81d832f34a02" containerName="glance-log" Nov 25 09:08:58 crc kubenswrapper[4932]: E1125 09:08:58.717203 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db4b005b-e018-4162-8056-be07edd72b71" containerName="init" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.717211 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="db4b005b-e018-4162-8056-be07edd72b71" containerName="init" Nov 25 09:08:58 crc kubenswrapper[4932]: E1125 09:08:58.717229 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eec87a7e-5df8-47d8-8584-81d832f34a02" containerName="glance-httpd" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.717237 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="eec87a7e-5df8-47d8-8584-81d832f34a02" containerName="glance-httpd" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.717396 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="eec87a7e-5df8-47d8-8584-81d832f34a02" containerName="glance-log" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.717418 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="db4b005b-e018-4162-8056-be07edd72b71" containerName="dnsmasq-dns" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.717426 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="eec87a7e-5df8-47d8-8584-81d832f34a02" containerName="glance-httpd" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.718241 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.721295 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.721305 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.725432 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.844577 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.844625 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xn2nf\" (UniqueName: \"kubernetes.io/projected/3b55fab6-584e-4098-beb0-be91c10e631f-kube-api-access-xn2nf\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.844725 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.844743 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.844762 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.844804 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.844850 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.844878 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-logs\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.946667 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.946738 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.946768 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-logs\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.946826 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.946848 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xn2nf\" (UniqueName: \"kubernetes.io/projected/3b55fab6-584e-4098-beb0-be91c10e631f-kube-api-access-xn2nf\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.946873 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.946886 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.946904 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.947784 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.948450 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-logs\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.948766 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.955819 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.955963 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-scripts\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.956488 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.957120 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-config-data\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.966125 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xn2nf\" (UniqueName: \"kubernetes.io/projected/3b55fab6-584e-4098-beb0-be91c10e631f-kube-api-access-xn2nf\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:58 crc kubenswrapper[4932]: I1125 09:08:58.976680 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:08:59 crc kubenswrapper[4932]: I1125 09:08:59.019843 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:08:59 crc kubenswrapper[4932]: I1125 09:08:59.020258 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:08:59 crc kubenswrapper[4932]: I1125 09:08:59.060807 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.071763 4932 scope.go:117] "RemoveContainer" containerID="d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.138541 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.274693 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mfkv\" (UniqueName: \"kubernetes.io/projected/07658137-1e65-459c-b55d-3548a2210b30-kube-api-access-9mfkv\") pod \"07658137-1e65-459c-b55d-3548a2210b30\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.274755 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-config-data\") pod \"07658137-1e65-459c-b55d-3548a2210b30\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.274807 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-logs\") pod \"07658137-1e65-459c-b55d-3548a2210b30\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.274830 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-httpd-run\") pod \"07658137-1e65-459c-b55d-3548a2210b30\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.274894 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-scripts\") pod \"07658137-1e65-459c-b55d-3548a2210b30\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.274956 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"07658137-1e65-459c-b55d-3548a2210b30\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.275013 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-combined-ca-bundle\") pod \"07658137-1e65-459c-b55d-3548a2210b30\" (UID: \"07658137-1e65-459c-b55d-3548a2210b30\") " Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.275304 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "07658137-1e65-459c-b55d-3548a2210b30" (UID: "07658137-1e65-459c-b55d-3548a2210b30"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.275420 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.276257 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-logs" (OuterVolumeSpecName: "logs") pod "07658137-1e65-459c-b55d-3548a2210b30" (UID: "07658137-1e65-459c-b55d-3548a2210b30"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.281067 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-scripts" (OuterVolumeSpecName: "scripts") pod "07658137-1e65-459c-b55d-3548a2210b30" (UID: "07658137-1e65-459c-b55d-3548a2210b30"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.285438 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "07658137-1e65-459c-b55d-3548a2210b30" (UID: "07658137-1e65-459c-b55d-3548a2210b30"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.295564 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07658137-1e65-459c-b55d-3548a2210b30-kube-api-access-9mfkv" (OuterVolumeSpecName: "kube-api-access-9mfkv") pod "07658137-1e65-459c-b55d-3548a2210b30" (UID: "07658137-1e65-459c-b55d-3548a2210b30"). InnerVolumeSpecName "kube-api-access-9mfkv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.311373 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07658137-1e65-459c-b55d-3548a2210b30" (UID: "07658137-1e65-459c-b55d-3548a2210b30"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.326327 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-config-data" (OuterVolumeSpecName: "config-data") pod "07658137-1e65-459c-b55d-3548a2210b30" (UID: "07658137-1e65-459c-b55d-3548a2210b30"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.376972 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.377647 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.377685 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mfkv\" (UniqueName: \"kubernetes.io/projected/07658137-1e65-459c-b55d-3548a2210b30-kube-api-access-9mfkv\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.377700 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.377711 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07658137-1e65-459c-b55d-3548a2210b30-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.377720 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/07658137-1e65-459c-b55d-3548a2210b30-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.396367 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.479735 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.619747 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eec87a7e-5df8-47d8-8584-81d832f34a02" path="/var/lib/kubelet/pods/eec87a7e-5df8-47d8-8584-81d832f34a02/volumes" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.671770 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"07658137-1e65-459c-b55d-3548a2210b30","Type":"ContainerDied","Data":"bba7de732a6faabd98d1de172e45b408e28b0bbe2fac0aee6d655cfcf4b2ac3e"} Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.671795 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.674931 4932 generic.go:334] "Generic (PLEG): container finished" podID="7537a42d-2ff4-48ed-b6c4-3efc948e72ab" containerID="35b97667ea2f3c8f4d0ed84ea1858ea883985abf6696778a33a9ecd4a8814409" exitCode=0 Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.674983 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xlpr4" event={"ID":"7537a42d-2ff4-48ed-b6c4-3efc948e72ab","Type":"ContainerDied","Data":"35b97667ea2f3c8f4d0ed84ea1858ea883985abf6696778a33a9ecd4a8814409"} Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.717299 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.729736 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.745649 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:09:00 crc kubenswrapper[4932]: E1125 09:09:00.746058 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07658137-1e65-459c-b55d-3548a2210b30" containerName="glance-httpd" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.746076 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="07658137-1e65-459c-b55d-3548a2210b30" containerName="glance-httpd" Nov 25 09:09:00 crc kubenswrapper[4932]: E1125 09:09:00.746091 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07658137-1e65-459c-b55d-3548a2210b30" containerName="glance-log" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.746099 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="07658137-1e65-459c-b55d-3548a2210b30" containerName="glance-log" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.746433 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="07658137-1e65-459c-b55d-3548a2210b30" containerName="glance-log" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.746457 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="07658137-1e65-459c-b55d-3548a2210b30" containerName="glance-httpd" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.747320 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.749499 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.749716 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.757302 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.886305 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.886421 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kh2xv\" (UniqueName: \"kubernetes.io/projected/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-kube-api-access-kh2xv\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.886462 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-logs\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.886520 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.886541 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-scripts\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.886565 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-config-data\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.886602 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.886632 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.988424 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kh2xv\" (UniqueName: \"kubernetes.io/projected/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-kube-api-access-kh2xv\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.988891 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-logs\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.988995 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.989021 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-scripts\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.989076 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-config-data\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.989139 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.989175 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.989316 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.989759 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.991247 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.995537 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-logs\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.995948 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-scripts\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.996878 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.997426 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:00 crc kubenswrapper[4932]: I1125 09:09:00.999148 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-config-data\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.007386 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kh2xv\" (UniqueName: \"kubernetes.io/projected/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-kube-api-access-kh2xv\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.020231 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " pod="openstack/glance-default-external-api-0" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.063760 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.532975 4932 scope.go:117] "RemoveContainer" containerID="ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301" Nov 25 09:09:01 crc kubenswrapper[4932]: E1125 09:09:01.533592 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301\": container with ID starting with ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301 not found: ID does not exist" containerID="ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.533626 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301"} err="failed to get container status \"ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301\": rpc error: code = NotFound desc = could not find container \"ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301\": container with ID starting with ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301 not found: ID does not exist" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.533653 4932 scope.go:117] "RemoveContainer" containerID="d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd" Nov 25 09:09:01 crc kubenswrapper[4932]: E1125 09:09:01.533919 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd\": container with ID starting with d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd not found: ID does not exist" containerID="d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.533939 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd"} err="failed to get container status \"d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd\": rpc error: code = NotFound desc = could not find container \"d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd\": container with ID starting with d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd not found: ID does not exist" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.533951 4932 scope.go:117] "RemoveContainer" containerID="ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.534214 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301"} err="failed to get container status \"ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301\": rpc error: code = NotFound desc = could not find container \"ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301\": container with ID starting with ce41d08139450cbad5bc782e9d3481ce2d5af2a7efc12738c7f1efb0f7012301 not found: ID does not exist" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.534241 4932 scope.go:117] "RemoveContainer" containerID="d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.534501 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd"} err="failed to get container status \"d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd\": rpc error: code = NotFound desc = could not find container \"d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd\": container with ID starting with d6bc5b5efcc64fda68908a352c701bfedc3c99385e8d9f32505fad66509482dd not found: ID does not exist" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.534537 4932 scope.go:117] "RemoveContainer" containerID="3b5911e36ca8c2d0426bec1a4c8ddaa7ba763f8df5bd7bb8d64fc12c2006d4f4" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.696906 4932 scope.go:117] "RemoveContainer" containerID="3d63b7f3e59acd8d18c2a77be92bf8ce0d8b19d274262806a9c3824def2ee158" Nov 25 09:09:01 crc kubenswrapper[4932]: I1125 09:09:01.966973 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.015954 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-config-data\") pod \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.016083 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntbg5\" (UniqueName: \"kubernetes.io/projected/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-kube-api-access-ntbg5\") pod \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.016129 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-scripts\") pod \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.016209 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-fernet-keys\") pod \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.016273 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-combined-ca-bundle\") pod \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.016346 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-credential-keys\") pod \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\" (UID: \"7537a42d-2ff4-48ed-b6c4-3efc948e72ab\") " Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.023696 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-kube-api-access-ntbg5" (OuterVolumeSpecName: "kube-api-access-ntbg5") pod "7537a42d-2ff4-48ed-b6c4-3efc948e72ab" (UID: "7537a42d-2ff4-48ed-b6c4-3efc948e72ab"). InnerVolumeSpecName "kube-api-access-ntbg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.026629 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-scripts" (OuterVolumeSpecName: "scripts") pod "7537a42d-2ff4-48ed-b6c4-3efc948e72ab" (UID: "7537a42d-2ff4-48ed-b6c4-3efc948e72ab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.026684 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "7537a42d-2ff4-48ed-b6c4-3efc948e72ab" (UID: "7537a42d-2ff4-48ed-b6c4-3efc948e72ab"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.026702 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7537a42d-2ff4-48ed-b6c4-3efc948e72ab" (UID: "7537a42d-2ff4-48ed-b6c4-3efc948e72ab"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.061296 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-config-data" (OuterVolumeSpecName: "config-data") pod "7537a42d-2ff4-48ed-b6c4-3efc948e72ab" (UID: "7537a42d-2ff4-48ed-b6c4-3efc948e72ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.079442 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7537a42d-2ff4-48ed-b6c4-3efc948e72ab" (UID: "7537a42d-2ff4-48ed-b6c4-3efc948e72ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.096621 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.118494 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntbg5\" (UniqueName: \"kubernetes.io/projected/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-kube-api-access-ntbg5\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.118526 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.118539 4932 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.118549 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.118557 4932 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.118567 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7537a42d-2ff4-48ed-b6c4-3efc948e72ab-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:02 crc kubenswrapper[4932]: W1125 09:09:02.197600 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod230dcb38_b2f4_45fa_91a9_46c11fc57e3d.slice/crio-90e3392ffc938fdbf1d276c76a105efda03fbd0a547fa3f55d4b60b7bee9c43e WatchSource:0}: Error finding container 90e3392ffc938fdbf1d276c76a105efda03fbd0a547fa3f55d4b60b7bee9c43e: Status 404 returned error can't find the container with id 90e3392ffc938fdbf1d276c76a105efda03fbd0a547fa3f55d4b60b7bee9c43e Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.200006 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.623027 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07658137-1e65-459c-b55d-3548a2210b30" path="/var/lib/kubelet/pods/07658137-1e65-459c-b55d-3548a2210b30/volumes" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.703473 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bb5dd5d-4c94-434e-880d-f47d84a21724","Type":"ContainerStarted","Data":"81a8ed2afc7ba1b9a6e31c72b4fb9aefb28e71a15bd4787500868d40d4ede1ba"} Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.705393 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-xlpr4" event={"ID":"7537a42d-2ff4-48ed-b6c4-3efc948e72ab","Type":"ContainerDied","Data":"1950783652ed81e0e2f23585b3be7fbc564176816b531c124cdd2cd0a21daf33"} Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.705418 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1950783652ed81e0e2f23585b3be7fbc564176816b531c124cdd2cd0a21daf33" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.705482 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-xlpr4" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.709115 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"230dcb38-b2f4-45fa-91a9-46c11fc57e3d","Type":"ContainerStarted","Data":"35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6"} Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.709154 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"230dcb38-b2f4-45fa-91a9-46c11fc57e3d","Type":"ContainerStarted","Data":"90e3392ffc938fdbf1d276c76a105efda03fbd0a547fa3f55d4b60b7bee9c43e"} Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.718534 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gqn2r" event={"ID":"d4a545d2-ff3c-4a27-b210-4803cdbf3c86","Type":"ContainerStarted","Data":"e5ddbce93438e379c217b355c8b70d4d14eedbb1679bbeaf3ff04b9b953a64d0"} Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.735711 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3b55fab6-584e-4098-beb0-be91c10e631f","Type":"ContainerStarted","Data":"9819269c37912ac65e199eb5854e29f21125b062fca1aae014ea6ef312421df9"} Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.735746 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3b55fab6-584e-4098-beb0-be91c10e631f","Type":"ContainerStarted","Data":"971e9287542eb70fedae2132b2c0628d0f534e0db33279cf709e480140028bbc"} Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.745994 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-gqn2r" podStartSLOduration=3.059293114 podStartE2EDuration="38.745977594s" podCreationTimestamp="2025-11-25 09:08:24 +0000 UTC" firstStartedPulling="2025-11-25 09:08:26.059592662 +0000 UTC m=+1166.185622225" lastFinishedPulling="2025-11-25 09:09:01.746277142 +0000 UTC m=+1201.872306705" observedRunningTime="2025-11-25 09:09:02.739792816 +0000 UTC m=+1202.865822379" watchObservedRunningTime="2025-11-25 09:09:02.745977594 +0000 UTC m=+1202.872007157" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.882798 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6fb96c5d7c-tsdlh"] Nov 25 09:09:02 crc kubenswrapper[4932]: E1125 09:09:02.883250 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7537a42d-2ff4-48ed-b6c4-3efc948e72ab" containerName="keystone-bootstrap" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.883262 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7537a42d-2ff4-48ed-b6c4-3efc948e72ab" containerName="keystone-bootstrap" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.883478 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="7537a42d-2ff4-48ed-b6c4-3efc948e72ab" containerName="keystone-bootstrap" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.884009 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.887932 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.888426 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-rgvcq" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.888535 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.888645 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.888821 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.888918 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.896932 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6fb96c5d7c-tsdlh"] Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.932915 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-combined-ca-bundle\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.932973 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-internal-tls-certs\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.937321 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-credential-keys\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.937710 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-config-data\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.937749 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-public-tls-certs\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.937895 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-scripts\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.937931 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-fernet-keys\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:02 crc kubenswrapper[4932]: I1125 09:09:02.938016 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcb97\" (UniqueName: \"kubernetes.io/projected/8153c48a-65e5-4525-b3ca-4dba83d94681-kube-api-access-rcb97\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.039437 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-credential-keys\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.039490 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-config-data\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.039510 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-public-tls-certs\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.039545 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-scripts\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.039563 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-fernet-keys\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.039588 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcb97\" (UniqueName: \"kubernetes.io/projected/8153c48a-65e5-4525-b3ca-4dba83d94681-kube-api-access-rcb97\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.039668 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-combined-ca-bundle\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.039871 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-internal-tls-certs\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.046099 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-public-tls-certs\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.048033 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-combined-ca-bundle\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.048149 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-internal-tls-certs\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.049147 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-fernet-keys\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.049488 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-scripts\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.052135 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-credential-keys\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.052326 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-config-data\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.058039 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcb97\" (UniqueName: \"kubernetes.io/projected/8153c48a-65e5-4525-b3ca-4dba83d94681-kube-api-access-rcb97\") pod \"keystone-6fb96c5d7c-tsdlh\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.220119 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:03 crc kubenswrapper[4932]: W1125 09:09:03.663094 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8153c48a_65e5_4525_b3ca_4dba83d94681.slice/crio-c326432da8ac46fcc278b4e68d6bf8adbdc33df2c2102fab16bf09e99eeb7ab6 WatchSource:0}: Error finding container c326432da8ac46fcc278b4e68d6bf8adbdc33df2c2102fab16bf09e99eeb7ab6: Status 404 returned error can't find the container with id c326432da8ac46fcc278b4e68d6bf8adbdc33df2c2102fab16bf09e99eeb7ab6 Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.668086 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6fb96c5d7c-tsdlh"] Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.762546 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3b55fab6-584e-4098-beb0-be91c10e631f","Type":"ContainerStarted","Data":"b2c7e77789303154985b5d572e2e645dc59426801021e02f4bfe4ed70e08243e"} Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.766996 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6fb96c5d7c-tsdlh" event={"ID":"8153c48a-65e5-4525-b3ca-4dba83d94681","Type":"ContainerStarted","Data":"c326432da8ac46fcc278b4e68d6bf8adbdc33df2c2102fab16bf09e99eeb7ab6"} Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.768851 4932 generic.go:334] "Generic (PLEG): container finished" podID="0f0aa1af-46c3-4583-9140-149dddf9b048" containerID="44c20a6528a98cf85e7eddf8dc1dcf7d3891a50d73c5bee0ebfb78f28694c629" exitCode=0 Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.768914 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p579r" event={"ID":"0f0aa1af-46c3-4583-9140-149dddf9b048","Type":"ContainerDied","Data":"44c20a6528a98cf85e7eddf8dc1dcf7d3891a50d73c5bee0ebfb78f28694c629"} Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.772964 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"230dcb38-b2f4-45fa-91a9-46c11fc57e3d","Type":"ContainerStarted","Data":"8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac"} Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.819230 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.819204856 podStartE2EDuration="5.819204856s" podCreationTimestamp="2025-11-25 09:08:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:03.785617152 +0000 UTC m=+1203.911646725" watchObservedRunningTime="2025-11-25 09:09:03.819204856 +0000 UTC m=+1203.945234419" Nov 25 09:09:03 crc kubenswrapper[4932]: I1125 09:09:03.823396 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.823383366 podStartE2EDuration="3.823383366s" podCreationTimestamp="2025-11-25 09:09:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:03.812671879 +0000 UTC m=+1203.938701452" watchObservedRunningTime="2025-11-25 09:09:03.823383366 +0000 UTC m=+1203.949412929" Nov 25 09:09:04 crc kubenswrapper[4932]: I1125 09:09:04.782208 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6fb96c5d7c-tsdlh" event={"ID":"8153c48a-65e5-4525-b3ca-4dba83d94681","Type":"ContainerStarted","Data":"cf9486063626577ad9657d77cfb72663e93d27944dae23244b9e36f69d66b24d"} Nov 25 09:09:04 crc kubenswrapper[4932]: I1125 09:09:04.811912 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6fb96c5d7c-tsdlh" podStartSLOduration=2.811893886 podStartE2EDuration="2.811893886s" podCreationTimestamp="2025-11-25 09:09:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:04.804087051 +0000 UTC m=+1204.930116614" watchObservedRunningTime="2025-11-25 09:09:04.811893886 +0000 UTC m=+1204.937923449" Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.126690 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p579r" Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.286127 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-db-sync-config-data\") pod \"0f0aa1af-46c3-4583-9140-149dddf9b048\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.286530 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-combined-ca-bundle\") pod \"0f0aa1af-46c3-4583-9140-149dddf9b048\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.286569 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srp5z\" (UniqueName: \"kubernetes.io/projected/0f0aa1af-46c3-4583-9140-149dddf9b048-kube-api-access-srp5z\") pod \"0f0aa1af-46c3-4583-9140-149dddf9b048\" (UID: \"0f0aa1af-46c3-4583-9140-149dddf9b048\") " Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.294646 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f0aa1af-46c3-4583-9140-149dddf9b048-kube-api-access-srp5z" (OuterVolumeSpecName: "kube-api-access-srp5z") pod "0f0aa1af-46c3-4583-9140-149dddf9b048" (UID: "0f0aa1af-46c3-4583-9140-149dddf9b048"). InnerVolumeSpecName "kube-api-access-srp5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.298462 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "0f0aa1af-46c3-4583-9140-149dddf9b048" (UID: "0f0aa1af-46c3-4583-9140-149dddf9b048"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.321936 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0f0aa1af-46c3-4583-9140-149dddf9b048" (UID: "0f0aa1af-46c3-4583-9140-149dddf9b048"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.389396 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.389434 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srp5z\" (UniqueName: \"kubernetes.io/projected/0f0aa1af-46c3-4583-9140-149dddf9b048-kube-api-access-srp5z\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.389447 4932 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/0f0aa1af-46c3-4583-9140-149dddf9b048-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.792872 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-p579r" Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.797424 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-p579r" event={"ID":"0f0aa1af-46c3-4583-9140-149dddf9b048","Type":"ContainerDied","Data":"f550a4806e0785ad0522b57a70eefee34e9ef89616881712a28263a57fc6975d"} Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.797461 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f550a4806e0785ad0522b57a70eefee34e9ef89616881712a28263a57fc6975d" Nov 25 09:09:05 crc kubenswrapper[4932]: I1125 09:09:05.797484 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.073479 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6d444df75c-9wqvx"] Nov 25 09:09:06 crc kubenswrapper[4932]: E1125 09:09:06.073883 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f0aa1af-46c3-4583-9140-149dddf9b048" containerName="barbican-db-sync" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.073902 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f0aa1af-46c3-4583-9140-149dddf9b048" containerName="barbican-db-sync" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.074130 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f0aa1af-46c3-4583-9140-149dddf9b048" containerName="barbican-db-sync" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.076633 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.088837 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-8jckn" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.091303 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.091531 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.102138 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6d444df75c-9wqvx"] Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.134957 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-76dfd47846-vpn45"] Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.136542 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.140031 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.158095 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-76dfd47846-vpn45"] Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.177004 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55d78b665f-95bxl"] Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.178448 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.210624 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmllz\" (UniqueName: \"kubernetes.io/projected/d1c39090-1743-40c3-95d5-71f5ca126c96-kube-api-access-bmllz\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.211816 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1c39090-1743-40c3-95d5-71f5ca126c96-logs\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.211888 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-combined-ca-bundle\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.211936 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data-custom\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.212027 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.214962 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55d78b665f-95bxl"] Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314635 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314705 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmllz\" (UniqueName: \"kubernetes.io/projected/d1c39090-1743-40c3-95d5-71f5ca126c96-kube-api-access-bmllz\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314736 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1c39090-1743-40c3-95d5-71f5ca126c96-logs\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314762 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data-custom\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314804 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-dns-svc\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314837 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-combined-ca-bundle\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314857 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jzsz\" (UniqueName: \"kubernetes.io/projected/a83ee8ae-69d7-4ca5-ade1-9d2450880338-kube-api-access-2jzsz\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314882 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-sb\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314902 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjjjt\" (UniqueName: \"kubernetes.io/projected/d4c020e1-f32a-4d5a-8059-63537459377a-kube-api-access-jjjjt\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314921 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data-custom\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314938 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a83ee8ae-69d7-4ca5-ade1-9d2450880338-logs\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314966 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.314986 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-config\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.315011 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-combined-ca-bundle\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.315033 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-nb\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.317521 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1c39090-1743-40c3-95d5-71f5ca126c96-logs\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.317751 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5dd5649696-p9hnq"] Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.321017 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data-custom\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.321454 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.322248 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-combined-ca-bundle\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.324799 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.329722 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.333788 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5dd5649696-p9hnq"] Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.336071 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmllz\" (UniqueName: \"kubernetes.io/projected/d1c39090-1743-40c3-95d5-71f5ca126c96-kube-api-access-bmllz\") pod \"barbican-worker-6d444df75c-9wqvx\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.408220 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.418706 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jzsz\" (UniqueName: \"kubernetes.io/projected/a83ee8ae-69d7-4ca5-ade1-9d2450880338-kube-api-access-2jzsz\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.419619 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-sb\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.419689 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data-custom\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.419970 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-logs\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.420011 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjjjt\" (UniqueName: \"kubernetes.io/projected/d4c020e1-f32a-4d5a-8059-63537459377a-kube-api-access-jjjjt\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.420148 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a83ee8ae-69d7-4ca5-ade1-9d2450880338-logs\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.420299 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-combined-ca-bundle\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.420491 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.420671 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.420809 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-config\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.420882 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a83ee8ae-69d7-4ca5-ade1-9d2450880338-logs\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.420996 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-combined-ca-bundle\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.421170 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-nb\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.421800 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gs9t9\" (UniqueName: \"kubernetes.io/projected/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-kube-api-access-gs9t9\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.421960 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-config\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.421989 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-sb\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.421991 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data-custom\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.422133 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-dns-svc\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.426873 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data-custom\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.427270 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-nb\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.427817 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-dns-svc\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.433826 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.436736 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-combined-ca-bundle\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.437277 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jzsz\" (UniqueName: \"kubernetes.io/projected/a83ee8ae-69d7-4ca5-ade1-9d2450880338-kube-api-access-2jzsz\") pod \"barbican-keystone-listener-76dfd47846-vpn45\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.442977 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjjjt\" (UniqueName: \"kubernetes.io/projected/d4c020e1-f32a-4d5a-8059-63537459377a-kube-api-access-jjjjt\") pod \"dnsmasq-dns-55d78b665f-95bxl\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.458913 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.505633 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.527492 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data-custom\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.527565 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-logs\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.527623 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-combined-ca-bundle\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.527663 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.527781 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gs9t9\" (UniqueName: \"kubernetes.io/projected/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-kube-api-access-gs9t9\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.528683 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-logs\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.533791 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-combined-ca-bundle\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.534951 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.538074 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data-custom\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.547881 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gs9t9\" (UniqueName: \"kubernetes.io/projected/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-kube-api-access-gs9t9\") pod \"barbican-api-5dd5649696-p9hnq\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.709878 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:06 crc kubenswrapper[4932]: I1125 09:09:06.973761 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6d444df75c-9wqvx"] Nov 25 09:09:06 crc kubenswrapper[4932]: W1125 09:09:06.981326 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1c39090_1743_40c3_95d5_71f5ca126c96.slice/crio-1f07dc6e3934d752e43ef44bc4ca25b8cd98c2b8b4282bb5a0f98fea0d0ddebf WatchSource:0}: Error finding container 1f07dc6e3934d752e43ef44bc4ca25b8cd98c2b8b4282bb5a0f98fea0d0ddebf: Status 404 returned error can't find the container with id 1f07dc6e3934d752e43ef44bc4ca25b8cd98c2b8b4282bb5a0f98fea0d0ddebf Nov 25 09:09:07 crc kubenswrapper[4932]: I1125 09:09:07.017915 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-76dfd47846-vpn45"] Nov 25 09:09:07 crc kubenswrapper[4932]: W1125 09:09:07.018603 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda83ee8ae_69d7_4ca5_ade1_9d2450880338.slice/crio-891b2037453285938f63ad02841b01b04841feabe49d02f26e783bcd24edc323 WatchSource:0}: Error finding container 891b2037453285938f63ad02841b01b04841feabe49d02f26e783bcd24edc323: Status 404 returned error can't find the container with id 891b2037453285938f63ad02841b01b04841feabe49d02f26e783bcd24edc323 Nov 25 09:09:07 crc kubenswrapper[4932]: I1125 09:09:07.086908 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55d78b665f-95bxl"] Nov 25 09:09:07 crc kubenswrapper[4932]: W1125 09:09:07.092865 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4c020e1_f32a_4d5a_8059_63537459377a.slice/crio-9488ac0c965a337ae8683d7dba08068dd43252e5c8c0a9c2f4bf0c9b073aefa7 WatchSource:0}: Error finding container 9488ac0c965a337ae8683d7dba08068dd43252e5c8c0a9c2f4bf0c9b073aefa7: Status 404 returned error can't find the container with id 9488ac0c965a337ae8683d7dba08068dd43252e5c8c0a9c2f4bf0c9b073aefa7 Nov 25 09:09:07 crc kubenswrapper[4932]: I1125 09:09:07.308885 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5dd5649696-p9hnq"] Nov 25 09:09:07 crc kubenswrapper[4932]: W1125 09:09:07.310182 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3dd629e_01fc_495c_926b_b72c5fe0f2f9.slice/crio-261a9bf12c45043cc00867a433d34a9d527b4484f8b06ea117f40887c3434855 WatchSource:0}: Error finding container 261a9bf12c45043cc00867a433d34a9d527b4484f8b06ea117f40887c3434855: Status 404 returned error can't find the container with id 261a9bf12c45043cc00867a433d34a9d527b4484f8b06ea117f40887c3434855 Nov 25 09:09:07 crc kubenswrapper[4932]: I1125 09:09:07.827032 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6d444df75c-9wqvx" event={"ID":"d1c39090-1743-40c3-95d5-71f5ca126c96","Type":"ContainerStarted","Data":"1f07dc6e3934d752e43ef44bc4ca25b8cd98c2b8b4282bb5a0f98fea0d0ddebf"} Nov 25 09:09:07 crc kubenswrapper[4932]: I1125 09:09:07.828586 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dd5649696-p9hnq" event={"ID":"a3dd629e-01fc-495c-926b-b72c5fe0f2f9","Type":"ContainerStarted","Data":"261a9bf12c45043cc00867a433d34a9d527b4484f8b06ea117f40887c3434855"} Nov 25 09:09:07 crc kubenswrapper[4932]: I1125 09:09:07.830328 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" event={"ID":"d4c020e1-f32a-4d5a-8059-63537459377a","Type":"ContainerStarted","Data":"9488ac0c965a337ae8683d7dba08068dd43252e5c8c0a9c2f4bf0c9b073aefa7"} Nov 25 09:09:07 crc kubenswrapper[4932]: I1125 09:09:07.831301 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" event={"ID":"a83ee8ae-69d7-4ca5-ade1-9d2450880338","Type":"ContainerStarted","Data":"891b2037453285938f63ad02841b01b04841feabe49d02f26e783bcd24edc323"} Nov 25 09:09:08 crc kubenswrapper[4932]: E1125 09:09:08.610072 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75\\\"\"]" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" Nov 25 09:09:08 crc kubenswrapper[4932]: I1125 09:09:08.841866 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" event={"ID":"d4c020e1-f32a-4d5a-8059-63537459377a","Type":"ContainerStarted","Data":"09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0"} Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.061679 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.061724 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.091788 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.099739 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.363649 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6c674848fb-kcq2h"] Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.365409 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.367363 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.367665 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.375553 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6c674848fb-kcq2h"] Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.501002 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-combined-ca-bundle\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.501069 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qx2rr\" (UniqueName: \"kubernetes.io/projected/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-kube-api-access-qx2rr\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.501099 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-internal-tls-certs\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.501137 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.501155 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data-custom\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.501174 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-public-tls-certs\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.501210 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-logs\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.603059 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qx2rr\" (UniqueName: \"kubernetes.io/projected/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-kube-api-access-qx2rr\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.603128 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-internal-tls-certs\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.603200 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.603228 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data-custom\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.603250 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-public-tls-certs\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.603276 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-logs\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.603384 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-combined-ca-bundle\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.604131 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-logs\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.609117 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-internal-tls-certs\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.609882 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-combined-ca-bundle\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.610324 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.610481 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-public-tls-certs\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.619776 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data-custom\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.624081 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qx2rr\" (UniqueName: \"kubernetes.io/projected/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-kube-api-access-qx2rr\") pod \"barbican-api-6c674848fb-kcq2h\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.703989 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.857377 4932 generic.go:334] "Generic (PLEG): container finished" podID="d4c020e1-f32a-4d5a-8059-63537459377a" containerID="09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0" exitCode=0 Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.857426 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" event={"ID":"d4c020e1-f32a-4d5a-8059-63537459377a","Type":"ContainerDied","Data":"09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0"} Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.870765 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dd5649696-p9hnq" event={"ID":"a3dd629e-01fc-495c-926b-b72c5fe0f2f9","Type":"ContainerStarted","Data":"7093803f813bbb4df669ea90b501429a8e528338fb2a6a95b35b3c6bd0926337"} Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.871234 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 09:09:09 crc kubenswrapper[4932]: I1125 09:09:09.871266 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 09:09:10 crc kubenswrapper[4932]: I1125 09:09:10.156735 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6c674848fb-kcq2h"] Nov 25 09:09:10 crc kubenswrapper[4932]: I1125 09:09:10.880888 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c674848fb-kcq2h" event={"ID":"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee","Type":"ContainerStarted","Data":"9bfc0f6aeca199054cc5f74846dd1737267b925812ff8c39383aa7003f0745b2"} Nov 25 09:09:11 crc kubenswrapper[4932]: I1125 09:09:11.064732 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 09:09:11 crc kubenswrapper[4932]: I1125 09:09:11.064793 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 09:09:11 crc kubenswrapper[4932]: I1125 09:09:11.096288 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 09:09:11 crc kubenswrapper[4932]: I1125 09:09:11.125281 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 09:09:11 crc kubenswrapper[4932]: I1125 09:09:11.881700 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 09:09:11 crc kubenswrapper[4932]: I1125 09:09:11.882164 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 09:09:11 crc kubenswrapper[4932]: I1125 09:09:11.891546 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c674848fb-kcq2h" event={"ID":"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee","Type":"ContainerStarted","Data":"a88145623badf6b75935e3c66fd27e243c456b8c75fa4980f833fcbd15313f78"} Nov 25 09:09:11 crc kubenswrapper[4932]: I1125 09:09:11.894323 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dd5649696-p9hnq" event={"ID":"a3dd629e-01fc-495c-926b-b72c5fe0f2f9","Type":"ContainerStarted","Data":"4b5bdc81472d27e076b36331c04dfd6e9c73a73688ac1fe3b237d0b32771a570"} Nov 25 09:09:11 crc kubenswrapper[4932]: I1125 09:09:11.894940 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:09:11 crc kubenswrapper[4932]: I1125 09:09:11.895284 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:09:12 crc kubenswrapper[4932]: I1125 09:09:12.935392 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5dd5649696-p9hnq" podStartSLOduration=6.93536772 podStartE2EDuration="6.93536772s" podCreationTimestamp="2025-11-25 09:09:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:12.92314717 +0000 UTC m=+1213.049176733" watchObservedRunningTime="2025-11-25 09:09:12.93536772 +0000 UTC m=+1213.061397283" Nov 25 09:09:13 crc kubenswrapper[4932]: I1125 09:09:13.972480 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 09:09:13 crc kubenswrapper[4932]: I1125 09:09:13.972908 4932 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:09:13 crc kubenswrapper[4932]: I1125 09:09:13.987785 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 09:09:16 crc kubenswrapper[4932]: I1125 09:09:16.711474 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:16 crc kubenswrapper[4932]: I1125 09:09:16.712108 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:20 crc kubenswrapper[4932]: I1125 09:09:20.686430 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5dd5649696-p9hnq" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:09:20 crc kubenswrapper[4932]: I1125 09:09:20.694419 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5dd5649696-p9hnq" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api-log" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 09:09:22 crc kubenswrapper[4932]: I1125 09:09:22.479877 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:22 crc kubenswrapper[4932]: I1125 09:09:22.481642 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:22 crc kubenswrapper[4932]: E1125 09:09:22.739334 4932 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/ubi9/httpd-24@sha256:8536169e5537fe6c330eba814248abdcf39cdd8f7e7336034d74e6fda9544050" Nov 25 09:09:22 crc kubenswrapper[4932]: E1125 09:09:22.739526 4932 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:proxy-httpd,Image:registry.redhat.io/ubi9/httpd-24@sha256:8536169e5537fe6c330eba814248abdcf39cdd8f7e7336034d74e6fda9544050,Command:[/usr/sbin/httpd],Args:[-DFOREGROUND],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:proxy-httpd,HostPort:0,ContainerPort:3000,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf/httpd.conf,SubPath:httpd.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/httpd/conf.d/ssl.conf,SubPath:ssl.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:run-httpd,ReadOnly:false,MountPath:/run/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:log-httpd,ReadOnly:false,MountPath:/var/log/httpd,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bfwws,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/,Port:{0 3000 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(9bb5dd5d-4c94-434e-880d-f47d84a21724): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 25 09:09:22 crc kubenswrapper[4932]: E1125 09:09:22.740980 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"ceilometer-notification-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"proxy-httpd\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"]" pod="openstack/ceilometer-0" podUID="9bb5dd5d-4c94-434e-880d-f47d84a21724" Nov 25 09:09:23 crc kubenswrapper[4932]: I1125 09:09:23.009225 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9bb5dd5d-4c94-434e-880d-f47d84a21724" containerName="sg-core" containerID="cri-o://81a8ed2afc7ba1b9a6e31c72b4fb9aefb28e71a15bd4787500868d40d4ede1ba" gracePeriod=30 Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.018096 4932 generic.go:334] "Generic (PLEG): container finished" podID="9bb5dd5d-4c94-434e-880d-f47d84a21724" containerID="81a8ed2afc7ba1b9a6e31c72b4fb9aefb28e71a15bd4787500868d40d4ede1ba" exitCode=2 Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.018135 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bb5dd5d-4c94-434e-880d-f47d84a21724","Type":"ContainerDied","Data":"81a8ed2afc7ba1b9a6e31c72b4fb9aefb28e71a15bd4787500868d40d4ede1ba"} Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.449781 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.517486 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfwws\" (UniqueName: \"kubernetes.io/projected/9bb5dd5d-4c94-434e-880d-f47d84a21724-kube-api-access-bfwws\") pod \"9bb5dd5d-4c94-434e-880d-f47d84a21724\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.517632 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-sg-core-conf-yaml\") pod \"9bb5dd5d-4c94-434e-880d-f47d84a21724\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.517721 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-scripts\") pod \"9bb5dd5d-4c94-434e-880d-f47d84a21724\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.517749 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-combined-ca-bundle\") pod \"9bb5dd5d-4c94-434e-880d-f47d84a21724\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.517802 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-log-httpd\") pod \"9bb5dd5d-4c94-434e-880d-f47d84a21724\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.517832 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-run-httpd\") pod \"9bb5dd5d-4c94-434e-880d-f47d84a21724\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.517866 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-config-data\") pod \"9bb5dd5d-4c94-434e-880d-f47d84a21724\" (UID: \"9bb5dd5d-4c94-434e-880d-f47d84a21724\") " Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.519117 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9bb5dd5d-4c94-434e-880d-f47d84a21724" (UID: "9bb5dd5d-4c94-434e-880d-f47d84a21724"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.519624 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9bb5dd5d-4c94-434e-880d-f47d84a21724" (UID: "9bb5dd5d-4c94-434e-880d-f47d84a21724"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.523985 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-config-data" (OuterVolumeSpecName: "config-data") pod "9bb5dd5d-4c94-434e-880d-f47d84a21724" (UID: "9bb5dd5d-4c94-434e-880d-f47d84a21724"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.524140 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-scripts" (OuterVolumeSpecName: "scripts") pod "9bb5dd5d-4c94-434e-880d-f47d84a21724" (UID: "9bb5dd5d-4c94-434e-880d-f47d84a21724"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.524574 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bb5dd5d-4c94-434e-880d-f47d84a21724-kube-api-access-bfwws" (OuterVolumeSpecName: "kube-api-access-bfwws") pod "9bb5dd5d-4c94-434e-880d-f47d84a21724" (UID: "9bb5dd5d-4c94-434e-880d-f47d84a21724"). InnerVolumeSpecName "kube-api-access-bfwws". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.524928 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9bb5dd5d-4c94-434e-880d-f47d84a21724" (UID: "9bb5dd5d-4c94-434e-880d-f47d84a21724"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.545451 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9bb5dd5d-4c94-434e-880d-f47d84a21724" (UID: "9bb5dd5d-4c94-434e-880d-f47d84a21724"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.619927 4932 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.619962 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.619978 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.619988 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.619999 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9bb5dd5d-4c94-434e-880d-f47d84a21724-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.620009 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9bb5dd5d-4c94-434e-880d-f47d84a21724-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:24 crc kubenswrapper[4932]: I1125 09:09:24.620019 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfwws\" (UniqueName: \"kubernetes.io/projected/9bb5dd5d-4c94-434e-880d-f47d84a21724-kube-api-access-bfwws\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.031681 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c674848fb-kcq2h" event={"ID":"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee","Type":"ContainerStarted","Data":"bcb0b33d20667e08d805c88572654c89aed61e0f969c78fc5ef9ec57be99532f"} Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.032093 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.032108 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.033495 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6c674848fb-kcq2h" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.156:9311/healthcheck\": dial tcp 10.217.0.156:9311: connect: connection refused" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.034514 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" event={"ID":"d4c020e1-f32a-4d5a-8059-63537459377a","Type":"ContainerStarted","Data":"046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b"} Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.035301 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.037641 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9bb5dd5d-4c94-434e-880d-f47d84a21724","Type":"ContainerDied","Data":"18e2abf1d4004e7d41a67c4d81e029de1b01e115126e8fa57418e6d53bbe2fca"} Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.037695 4932 scope.go:117] "RemoveContainer" containerID="81a8ed2afc7ba1b9a6e31c72b4fb9aefb28e71a15bd4787500868d40d4ede1ba" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.037711 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.085870 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6c674848fb-kcq2h" podStartSLOduration=16.085846711 podStartE2EDuration="16.085846711s" podCreationTimestamp="2025-11-25 09:09:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:25.051266408 +0000 UTC m=+1225.177295981" watchObservedRunningTime="2025-11-25 09:09:25.085846711 +0000 UTC m=+1225.211876274" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.127050 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" podStartSLOduration=19.12414207 podStartE2EDuration="19.12414207s" podCreationTimestamp="2025-11-25 09:09:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:25.072043044 +0000 UTC m=+1225.198072627" watchObservedRunningTime="2025-11-25 09:09:25.12414207 +0000 UTC m=+1225.250171633" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.170134 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.182092 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.202265 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:09:25 crc kubenswrapper[4932]: E1125 09:09:25.202698 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bb5dd5d-4c94-434e-880d-f47d84a21724" containerName="sg-core" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.202723 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bb5dd5d-4c94-434e-880d-f47d84a21724" containerName="sg-core" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.202912 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bb5dd5d-4c94-434e-880d-f47d84a21724" containerName="sg-core" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.205539 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.213698 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.213920 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.221264 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.335923 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-run-httpd\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.336303 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-log-httpd\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.336424 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.336653 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.336740 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k882j\" (UniqueName: \"kubernetes.io/projected/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-kube-api-access-k882j\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.336859 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-config-data\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.336937 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-scripts\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.446777 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-log-httpd\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.446871 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.446890 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.446915 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k882j\" (UniqueName: \"kubernetes.io/projected/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-kube-api-access-k882j\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.446957 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-config-data\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.446978 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-scripts\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.447008 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-run-httpd\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.447472 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-run-httpd\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.447692 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-log-httpd\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.459785 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.460717 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-config-data\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.465848 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.470460 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-scripts\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.481219 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k882j\" (UniqueName: \"kubernetes.io/projected/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-kube-api-access-k882j\") pod \"ceilometer-0\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " pod="openstack/ceilometer-0" Nov 25 09:09:25 crc kubenswrapper[4932]: I1125 09:09:25.548213 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.029275 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.050009 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6d444df75c-9wqvx" event={"ID":"d1c39090-1743-40c3-95d5-71f5ca126c96","Type":"ContainerStarted","Data":"abe66e4f341b24534642787b92c4263f4ebf66e15aa3b9d673ff051b62fba4b5"} Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.050058 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6d444df75c-9wqvx" event={"ID":"d1c39090-1743-40c3-95d5-71f5ca126c96","Type":"ContainerStarted","Data":"319658ac79c4fb4fcd46ed313645d6769272569f11fd5e0e78f4b23b5fcf4935"} Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.051363 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"712f11c0-89ae-4e39-b6c6-3bb7303ccaae","Type":"ContainerStarted","Data":"d810a46699183fdd7a84d5f6d4758de112ef4594ada74290219dfd44e3cc4c26"} Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.053123 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" event={"ID":"a83ee8ae-69d7-4ca5-ade1-9d2450880338","Type":"ContainerStarted","Data":"be76288d747fd77398730e153b2bfa8b05e410e8971bd296d8c9d0bb4df3ac3b"} Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.053161 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" event={"ID":"a83ee8ae-69d7-4ca5-ade1-9d2450880338","Type":"ContainerStarted","Data":"905fc878d7a680a212ca79f470646dd7111019ec6d24cae51d0d6adfba1d2500"} Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.075595 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"568484e69b9c4127e15f009bf0e5694d15ca4b6ae5a35b7503f084c9adb3e9a3"} Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.075644 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"d77ea0e7a1509cc988fded84ce9cd4dc66e884a9b6f07ad09301588d2897762e"} Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.088638 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" podStartSLOduration=2.359784625 podStartE2EDuration="20.088619899s" podCreationTimestamp="2025-11-25 09:09:06 +0000 UTC" firstStartedPulling="2025-11-25 09:09:07.0258954 +0000 UTC m=+1207.151924963" lastFinishedPulling="2025-11-25 09:09:24.754730674 +0000 UTC m=+1224.880760237" observedRunningTime="2025-11-25 09:09:26.087911139 +0000 UTC m=+1226.213940712" watchObservedRunningTime="2025-11-25 09:09:26.088619899 +0000 UTC m=+1226.214649462" Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.097300 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lc5vk" event={"ID":"e14c1b6a-a83b-47fc-8fac-36468c1b4df5","Type":"ContainerStarted","Data":"044d1296bb65dfef08ae69e4e66aaaf33fef1c2827f6ab1bc28df180a80213c7"} Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.137873 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-lc5vk" podStartSLOduration=3.120406533 podStartE2EDuration="1m2.137847183s" podCreationTimestamp="2025-11-25 09:08:24 +0000 UTC" firstStartedPulling="2025-11-25 09:08:25.736929884 +0000 UTC m=+1165.862959437" lastFinishedPulling="2025-11-25 09:09:24.754370534 +0000 UTC m=+1224.880400087" observedRunningTime="2025-11-25 09:09:26.133629941 +0000 UTC m=+1226.259659504" watchObservedRunningTime="2025-11-25 09:09:26.137847183 +0000 UTC m=+1226.263876746" Nov 25 09:09:26 crc kubenswrapper[4932]: I1125 09:09:26.629021 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bb5dd5d-4c94-434e-880d-f47d84a21724" path="/var/lib/kubelet/pods/9bb5dd5d-4c94-434e-880d-f47d84a21724/volumes" Nov 25 09:09:27 crc kubenswrapper[4932]: I1125 09:09:27.109863 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"8418a97dfcdcedf4b5696213eb9548d1ede0f2e23cfc955f8dc8202263735b8a"} Nov 25 09:09:27 crc kubenswrapper[4932]: I1125 09:09:27.141431 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6d444df75c-9wqvx" podStartSLOduration=3.3742500509999998 podStartE2EDuration="21.141409175s" podCreationTimestamp="2025-11-25 09:09:06 +0000 UTC" firstStartedPulling="2025-11-25 09:09:06.987371524 +0000 UTC m=+1207.113401087" lastFinishedPulling="2025-11-25 09:09:24.754530648 +0000 UTC m=+1224.880560211" observedRunningTime="2025-11-25 09:09:27.127707632 +0000 UTC m=+1227.253737195" watchObservedRunningTime="2025-11-25 09:09:27.141409175 +0000 UTC m=+1227.267438738" Nov 25 09:09:27 crc kubenswrapper[4932]: I1125 09:09:27.651746 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.126000 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerStarted","Data":"5ae59b9454a13af8d9b95946eddb65b72cf6eb58ca8f5a5c793ecae3ee358a2f"} Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.128597 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"712f11c0-89ae-4e39-b6c6-3bb7303ccaae","Type":"ContainerStarted","Data":"354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55"} Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.159478 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=40.59882457 podStartE2EDuration="2m18.159462664s" podCreationTimestamp="2025-11-25 09:07:10 +0000 UTC" firstStartedPulling="2025-11-25 09:07:47.734373982 +0000 UTC m=+1127.860403545" lastFinishedPulling="2025-11-25 09:09:25.295012076 +0000 UTC m=+1225.421041639" observedRunningTime="2025-11-25 09:09:28.157868798 +0000 UTC m=+1228.283898371" watchObservedRunningTime="2025-11-25 09:09:28.159462664 +0000 UTC m=+1228.285492217" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.441811 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55d78b665f-95bxl"] Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.442039 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" podUID="d4c020e1-f32a-4d5a-8059-63537459377a" containerName="dnsmasq-dns" containerID="cri-o://046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b" gracePeriod=10 Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.469120 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-8ngpb"] Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.470635 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.472746 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.488076 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-8ngpb"] Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.617621 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-nb\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.617671 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49bw5\" (UniqueName: \"kubernetes.io/projected/2ea9c914-3d06-40d7-92f3-56f27f6c8900-kube-api-access-49bw5\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.617709 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-config\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.617727 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-swift-storage-0\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.617752 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-svc\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.617778 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-sb\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.719181 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-nb\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.719280 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49bw5\" (UniqueName: \"kubernetes.io/projected/2ea9c914-3d06-40d7-92f3-56f27f6c8900-kube-api-access-49bw5\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.719372 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-config\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.719411 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-swift-storage-0\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.719459 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-svc\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.719528 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-sb\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.720259 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-nb\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.720302 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-swift-storage-0\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.720959 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-sb\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.720979 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-svc\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.720976 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-config\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.754544 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49bw5\" (UniqueName: \"kubernetes.io/projected/2ea9c914-3d06-40d7-92f3-56f27f6c8900-kube-api-access-49bw5\") pod \"dnsmasq-dns-8b9b87645-8ngpb\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:28 crc kubenswrapper[4932]: I1125 09:09:28.841920 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.012470 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.130089 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-nb\") pod \"d4c020e1-f32a-4d5a-8059-63537459377a\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.130138 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjjjt\" (UniqueName: \"kubernetes.io/projected/d4c020e1-f32a-4d5a-8059-63537459377a-kube-api-access-jjjjt\") pod \"d4c020e1-f32a-4d5a-8059-63537459377a\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.130282 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-config\") pod \"d4c020e1-f32a-4d5a-8059-63537459377a\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.130323 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-sb\") pod \"d4c020e1-f32a-4d5a-8059-63537459377a\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.130415 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-dns-svc\") pod \"d4c020e1-f32a-4d5a-8059-63537459377a\" (UID: \"d4c020e1-f32a-4d5a-8059-63537459377a\") " Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.135501 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4c020e1-f32a-4d5a-8059-63537459377a-kube-api-access-jjjjt" (OuterVolumeSpecName: "kube-api-access-jjjjt") pod "d4c020e1-f32a-4d5a-8059-63537459377a" (UID: "d4c020e1-f32a-4d5a-8059-63537459377a"). InnerVolumeSpecName "kube-api-access-jjjjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.146066 4932 generic.go:334] "Generic (PLEG): container finished" podID="d4c020e1-f32a-4d5a-8059-63537459377a" containerID="046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b" exitCode=0 Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.148140 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.148149 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" event={"ID":"d4c020e1-f32a-4d5a-8059-63537459377a","Type":"ContainerDied","Data":"046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b"} Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.148286 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55d78b665f-95bxl" event={"ID":"d4c020e1-f32a-4d5a-8059-63537459377a","Type":"ContainerDied","Data":"9488ac0c965a337ae8683d7dba08068dd43252e5c8c0a9c2f4bf0c9b073aefa7"} Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.148307 4932 scope.go:117] "RemoveContainer" containerID="046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.212337 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d4c020e1-f32a-4d5a-8059-63537459377a" (UID: "d4c020e1-f32a-4d5a-8059-63537459377a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.214226 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d4c020e1-f32a-4d5a-8059-63537459377a" (UID: "d4c020e1-f32a-4d5a-8059-63537459377a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.214426 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d4c020e1-f32a-4d5a-8059-63537459377a" (UID: "d4c020e1-f32a-4d5a-8059-63537459377a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.232172 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.232216 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjjjt\" (UniqueName: \"kubernetes.io/projected/d4c020e1-f32a-4d5a-8059-63537459377a-kube-api-access-jjjjt\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.232227 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.232243 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.234198 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-config" (OuterVolumeSpecName: "config") pod "d4c020e1-f32a-4d5a-8059-63537459377a" (UID: "d4c020e1-f32a-4d5a-8059-63537459377a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.276122 4932 scope.go:117] "RemoveContainer" containerID="09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.313460 4932 scope.go:117] "RemoveContainer" containerID="046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b" Nov 25 09:09:29 crc kubenswrapper[4932]: E1125 09:09:29.317357 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b\": container with ID starting with 046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b not found: ID does not exist" containerID="046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.317405 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b"} err="failed to get container status \"046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b\": rpc error: code = NotFound desc = could not find container \"046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b\": container with ID starting with 046aea8199dd4256d5edaaf66edaea0c5e6b1c34df10bacb84dcdb1efebab03b not found: ID does not exist" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.317429 4932 scope.go:117] "RemoveContainer" containerID="09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0" Nov 25 09:09:29 crc kubenswrapper[4932]: E1125 09:09:29.321420 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0\": container with ID starting with 09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0 not found: ID does not exist" containerID="09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.321459 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0"} err="failed to get container status \"09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0\": rpc error: code = NotFound desc = could not find container \"09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0\": container with ID starting with 09c1eb9b9ad975ba91e201862d4a2ad1fadea8b0956bc947e95beab4e25dd4c0 not found: ID does not exist" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.333849 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4c020e1-f32a-4d5a-8059-63537459377a-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.392352 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-8ngpb"] Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.497248 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55d78b665f-95bxl"] Nov 25 09:09:29 crc kubenswrapper[4932]: I1125 09:09:29.519917 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55d78b665f-95bxl"] Nov 25 09:09:30 crc kubenswrapper[4932]: I1125 09:09:30.157034 4932 generic.go:334] "Generic (PLEG): container finished" podID="2ea9c914-3d06-40d7-92f3-56f27f6c8900" containerID="cff02c37abc945af69175f5d6a5ffd27f5b1d2ec23a8b025ec27782a3980e13d" exitCode=0 Nov 25 09:09:30 crc kubenswrapper[4932]: I1125 09:09:30.157133 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" event={"ID":"2ea9c914-3d06-40d7-92f3-56f27f6c8900","Type":"ContainerDied","Data":"cff02c37abc945af69175f5d6a5ffd27f5b1d2ec23a8b025ec27782a3980e13d"} Nov 25 09:09:30 crc kubenswrapper[4932]: I1125 09:09:30.157532 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" event={"ID":"2ea9c914-3d06-40d7-92f3-56f27f6c8900","Type":"ContainerStarted","Data":"77a9c3921441958cd006aeaac47456bd36d33f67f191ef4aaacb65589ae1bd34"} Nov 25 09:09:30 crc kubenswrapper[4932]: I1125 09:09:30.161916 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"712f11c0-89ae-4e39-b6c6-3bb7303ccaae","Type":"ContainerStarted","Data":"fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65"} Nov 25 09:09:30 crc kubenswrapper[4932]: I1125 09:09:30.656964 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4c020e1-f32a-4d5a-8059-63537459377a" path="/var/lib/kubelet/pods/d4c020e1-f32a-4d5a-8059-63537459377a/volumes" Nov 25 09:09:31 crc kubenswrapper[4932]: I1125 09:09:31.177749 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"712f11c0-89ae-4e39-b6c6-3bb7303ccaae","Type":"ContainerStarted","Data":"6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642"} Nov 25 09:09:31 crc kubenswrapper[4932]: I1125 09:09:31.180106 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" event={"ID":"2ea9c914-3d06-40d7-92f3-56f27f6c8900","Type":"ContainerStarted","Data":"c4cf0d747fb095e351de373eafc0beb87d2b0826cebea5d997c430b78e69d249"} Nov 25 09:09:31 crc kubenswrapper[4932]: I1125 09:09:31.180360 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:31 crc kubenswrapper[4932]: I1125 09:09:31.592642 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:09:31 crc kubenswrapper[4932]: I1125 09:09:31.615966 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" podStartSLOduration=3.6159485890000003 podStartE2EDuration="3.615948589s" podCreationTimestamp="2025-11-25 09:09:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:31.20495337 +0000 UTC m=+1231.330982953" watchObservedRunningTime="2025-11-25 09:09:31.615948589 +0000 UTC m=+1231.741978152" Nov 25 09:09:31 crc kubenswrapper[4932]: I1125 09:09:31.658478 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5dd5649696-p9hnq"] Nov 25 09:09:31 crc kubenswrapper[4932]: I1125 09:09:31.658725 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5dd5649696-p9hnq" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api-log" containerID="cri-o://7093803f813bbb4df669ea90b501429a8e528338fb2a6a95b35b3c6bd0926337" gracePeriod=30 Nov 25 09:09:31 crc kubenswrapper[4932]: I1125 09:09:31.663246 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5dd5649696-p9hnq" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api" containerID="cri-o://4b5bdc81472d27e076b36331c04dfd6e9c73a73688ac1fe3b237d0b32771a570" gracePeriod=30 Nov 25 09:09:32 crc kubenswrapper[4932]: I1125 09:09:32.189917 4932 generic.go:334] "Generic (PLEG): container finished" podID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerID="7093803f813bbb4df669ea90b501429a8e528338fb2a6a95b35b3c6bd0926337" exitCode=143 Nov 25 09:09:32 crc kubenswrapper[4932]: I1125 09:09:32.190024 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dd5649696-p9hnq" event={"ID":"a3dd629e-01fc-495c-926b-b72c5fe0f2f9","Type":"ContainerDied","Data":"7093803f813bbb4df669ea90b501429a8e528338fb2a6a95b35b3c6bd0926337"} Nov 25 09:09:34 crc kubenswrapper[4932]: I1125 09:09:34.215137 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"712f11c0-89ae-4e39-b6c6-3bb7303ccaae","Type":"ContainerStarted","Data":"32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539"} Nov 25 09:09:34 crc kubenswrapper[4932]: I1125 09:09:34.215717 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:09:34 crc kubenswrapper[4932]: I1125 09:09:34.240768 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.219655876 podStartE2EDuration="9.240749081s" podCreationTimestamp="2025-11-25 09:09:25 +0000 UTC" firstStartedPulling="2025-11-25 09:09:26.034743603 +0000 UTC m=+1226.160773176" lastFinishedPulling="2025-11-25 09:09:33.055836828 +0000 UTC m=+1233.181866381" observedRunningTime="2025-11-25 09:09:34.232480219 +0000 UTC m=+1234.358509782" watchObservedRunningTime="2025-11-25 09:09:34.240749081 +0000 UTC m=+1234.366778644" Nov 25 09:09:34 crc kubenswrapper[4932]: I1125 09:09:34.861908 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5dd5649696-p9hnq" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.155:9311/healthcheck\": read tcp 10.217.0.2:39932->10.217.0.155:9311: read: connection reset by peer" Nov 25 09:09:34 crc kubenswrapper[4932]: I1125 09:09:34.861913 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5dd5649696-p9hnq" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.155:9311/healthcheck\": read tcp 10.217.0.2:39924->10.217.0.155:9311: read: connection reset by peer" Nov 25 09:09:34 crc kubenswrapper[4932]: I1125 09:09:34.902778 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.172273 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 09:09:35 crc kubenswrapper[4932]: E1125 09:09:35.173152 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4c020e1-f32a-4d5a-8059-63537459377a" containerName="dnsmasq-dns" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.173263 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4c020e1-f32a-4d5a-8059-63537459377a" containerName="dnsmasq-dns" Nov 25 09:09:35 crc kubenswrapper[4932]: E1125 09:09:35.173280 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4c020e1-f32a-4d5a-8059-63537459377a" containerName="init" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.173287 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4c020e1-f32a-4d5a-8059-63537459377a" containerName="init" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.173905 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4c020e1-f32a-4d5a-8059-63537459377a" containerName="dnsmasq-dns" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.174724 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.180581 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.180780 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.180959 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-rrwlp" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.193319 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.230869 4932 generic.go:334] "Generic (PLEG): container finished" podID="d4a545d2-ff3c-4a27-b210-4803cdbf3c86" containerID="e5ddbce93438e379c217b355c8b70d4d14eedbb1679bbeaf3ff04b9b953a64d0" exitCode=0 Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.230945 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gqn2r" event={"ID":"d4a545d2-ff3c-4a27-b210-4803cdbf3c86","Type":"ContainerDied","Data":"e5ddbce93438e379c217b355c8b70d4d14eedbb1679bbeaf3ff04b9b953a64d0"} Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.233545 4932 generic.go:334] "Generic (PLEG): container finished" podID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerID="4b5bdc81472d27e076b36331c04dfd6e9c73a73688ac1fe3b237d0b32771a570" exitCode=0 Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.234384 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dd5649696-p9hnq" event={"ID":"a3dd629e-01fc-495c-926b-b72c5fe0f2f9","Type":"ContainerDied","Data":"4b5bdc81472d27e076b36331c04dfd6e9c73a73688ac1fe3b237d0b32771a570"} Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.272129 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.272413 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fjcg\" (UniqueName: \"kubernetes.io/projected/37a05df3-807b-4349-9ef8-d9767774c6f6-kube-api-access-7fjcg\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.272570 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config-secret\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.272652 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.367707 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.373930 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.374024 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fjcg\" (UniqueName: \"kubernetes.io/projected/37a05df3-807b-4349-9ef8-d9767774c6f6-kube-api-access-7fjcg\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.374097 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config-secret\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.374169 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.375427 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.380402 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config-secret\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.380892 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.395244 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fjcg\" (UniqueName: \"kubernetes.io/projected/37a05df3-807b-4349-9ef8-d9767774c6f6-kube-api-access-7fjcg\") pod \"openstackclient\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.424375 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.425122 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.441050 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.468270 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 09:09:35 crc kubenswrapper[4932]: E1125 09:09:35.468707 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api-log" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.468722 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api-log" Nov 25 09:09:35 crc kubenswrapper[4932]: E1125 09:09:35.468760 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.468768 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.468982 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.469009 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" containerName="barbican-api-log" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.469720 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.475077 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data-custom\") pod \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.475129 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-combined-ca-bundle\") pod \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.475285 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data\") pod \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.475321 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gs9t9\" (UniqueName: \"kubernetes.io/projected/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-kube-api-access-gs9t9\") pod \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.475439 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-logs\") pod \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\" (UID: \"a3dd629e-01fc-495c-926b-b72c5fe0f2f9\") " Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.477011 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.477883 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-logs" (OuterVolumeSpecName: "logs") pod "a3dd629e-01fc-495c-926b-b72c5fe0f2f9" (UID: "a3dd629e-01fc-495c-926b-b72c5fe0f2f9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.482498 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a3dd629e-01fc-495c-926b-b72c5fe0f2f9" (UID: "a3dd629e-01fc-495c-926b-b72c5fe0f2f9"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.484771 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-kube-api-access-gs9t9" (OuterVolumeSpecName: "kube-api-access-gs9t9") pod "a3dd629e-01fc-495c-926b-b72c5fe0f2f9" (UID: "a3dd629e-01fc-495c-926b-b72c5fe0f2f9"). InnerVolumeSpecName "kube-api-access-gs9t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.516847 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a3dd629e-01fc-495c-926b-b72c5fe0f2f9" (UID: "a3dd629e-01fc-495c-926b-b72c5fe0f2f9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.538919 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data" (OuterVolumeSpecName: "config-data") pod "a3dd629e-01fc-495c-926b-b72c5fe0f2f9" (UID: "a3dd629e-01fc-495c-926b-b72c5fe0f2f9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:35 crc kubenswrapper[4932]: E1125 09:09:35.565981 4932 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 25 09:09:35 crc kubenswrapper[4932]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_37a05df3-807b-4349-9ef8-d9767774c6f6_0(fa2552b8973191267e8a5989259e6f15dea1d6f34aaa475aad2b3cfdd9f761dc): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"fa2552b8973191267e8a5989259e6f15dea1d6f34aaa475aad2b3cfdd9f761dc" Netns:"/var/run/netns/86c5430c-6fd9-4b8f-b470-6c4872faacb0" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=fa2552b8973191267e8a5989259e6f15dea1d6f34aaa475aad2b3cfdd9f761dc;K8S_POD_UID=37a05df3-807b-4349-9ef8-d9767774c6f6" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/37a05df3-807b-4349-9ef8-d9767774c6f6]: expected pod UID "37a05df3-807b-4349-9ef8-d9767774c6f6" but got "28020cd8-f0a6-4aa9-80e6-4aa92b554850" from Kube API Nov 25 09:09:35 crc kubenswrapper[4932]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 25 09:09:35 crc kubenswrapper[4932]: > Nov 25 09:09:35 crc kubenswrapper[4932]: E1125 09:09:35.566229 4932 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 25 09:09:35 crc kubenswrapper[4932]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_37a05df3-807b-4349-9ef8-d9767774c6f6_0(fa2552b8973191267e8a5989259e6f15dea1d6f34aaa475aad2b3cfdd9f761dc): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"fa2552b8973191267e8a5989259e6f15dea1d6f34aaa475aad2b3cfdd9f761dc" Netns:"/var/run/netns/86c5430c-6fd9-4b8f-b470-6c4872faacb0" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=fa2552b8973191267e8a5989259e6f15dea1d6f34aaa475aad2b3cfdd9f761dc;K8S_POD_UID=37a05df3-807b-4349-9ef8-d9767774c6f6" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/37a05df3-807b-4349-9ef8-d9767774c6f6]: expected pod UID "37a05df3-807b-4349-9ef8-d9767774c6f6" but got "28020cd8-f0a6-4aa9-80e6-4aa92b554850" from Kube API Nov 25 09:09:35 crc kubenswrapper[4932]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 25 09:09:35 crc kubenswrapper[4932]: > pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.577092 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config-secret\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.577329 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-combined-ca-bundle\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.577463 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6b8f\" (UniqueName: \"kubernetes.io/projected/28020cd8-f0a6-4aa9-80e6-4aa92b554850-kube-api-access-c6b8f\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.577596 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.577789 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.577887 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gs9t9\" (UniqueName: \"kubernetes.io/projected/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-kube-api-access-gs9t9\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.577978 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.578045 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.578112 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3dd629e-01fc-495c-926b-b72c5fe0f2f9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.679674 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config-secret\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.679736 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-combined-ca-bundle\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.679784 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6b8f\" (UniqueName: \"kubernetes.io/projected/28020cd8-f0a6-4aa9-80e6-4aa92b554850-kube-api-access-c6b8f\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.679850 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.680868 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.684333 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config-secret\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.684904 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-combined-ca-bundle\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.702111 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6b8f\" (UniqueName: \"kubernetes.io/projected/28020cd8-f0a6-4aa9-80e6-4aa92b554850-kube-api-access-c6b8f\") pod \"openstackclient\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " pod="openstack/openstackclient" Nov 25 09:09:35 crc kubenswrapper[4932]: I1125 09:09:35.810495 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.245019 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.245873 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5dd5649696-p9hnq" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.247303 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dd5649696-p9hnq" event={"ID":"a3dd629e-01fc-495c-926b-b72c5fe0f2f9","Type":"ContainerDied","Data":"261a9bf12c45043cc00867a433d34a9d527b4484f8b06ea117f40887c3434855"} Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.247388 4932 scope.go:117] "RemoveContainer" containerID="4b5bdc81472d27e076b36331c04dfd6e9c73a73688ac1fe3b237d0b32771a570" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.257495 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="37a05df3-807b-4349-9ef8-d9767774c6f6" podUID="28020cd8-f0a6-4aa9-80e6-4aa92b554850" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.262495 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.277588 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.278523 4932 scope.go:117] "RemoveContainer" containerID="7093803f813bbb4df669ea90b501429a8e528338fb2a6a95b35b3c6bd0926337" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.292621 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5dd5649696-p9hnq"] Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.300996 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5dd5649696-p9hnq"] Nov 25 09:09:36 crc kubenswrapper[4932]: W1125 09:09:36.301832 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28020cd8_f0a6_4aa9_80e6_4aa92b554850.slice/crio-836daad55f6e949c097ffbd328c1d276d8bce3eb113e0c6df992d8826f6a195d WatchSource:0}: Error finding container 836daad55f6e949c097ffbd328c1d276d8bce3eb113e0c6df992d8826f6a195d: Status 404 returned error can't find the container with id 836daad55f6e949c097ffbd328c1d276d8bce3eb113e0c6df992d8826f6a195d Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.394386 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config-secret\") pod \"37a05df3-807b-4349-9ef8-d9767774c6f6\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.394559 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-combined-ca-bundle\") pod \"37a05df3-807b-4349-9ef8-d9767774c6f6\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.394644 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config\") pod \"37a05df3-807b-4349-9ef8-d9767774c6f6\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.394771 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fjcg\" (UniqueName: \"kubernetes.io/projected/37a05df3-807b-4349-9ef8-d9767774c6f6-kube-api-access-7fjcg\") pod \"37a05df3-807b-4349-9ef8-d9767774c6f6\" (UID: \"37a05df3-807b-4349-9ef8-d9767774c6f6\") " Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.397618 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "37a05df3-807b-4349-9ef8-d9767774c6f6" (UID: "37a05df3-807b-4349-9ef8-d9767774c6f6"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.409975 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37a05df3-807b-4349-9ef8-d9767774c6f6-kube-api-access-7fjcg" (OuterVolumeSpecName: "kube-api-access-7fjcg") pod "37a05df3-807b-4349-9ef8-d9767774c6f6" (UID: "37a05df3-807b-4349-9ef8-d9767774c6f6"). InnerVolumeSpecName "kube-api-access-7fjcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.426359 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "37a05df3-807b-4349-9ef8-d9767774c6f6" (UID: "37a05df3-807b-4349-9ef8-d9767774c6f6"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.431565 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37a05df3-807b-4349-9ef8-d9767774c6f6" (UID: "37a05df3-807b-4349-9ef8-d9767774c6f6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.498309 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.498347 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.498359 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fjcg\" (UniqueName: \"kubernetes.io/projected/37a05df3-807b-4349-9ef8-d9767774c6f6-kube-api-access-7fjcg\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.498370 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/37a05df3-807b-4349-9ef8-d9767774c6f6-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.624120 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37a05df3-807b-4349-9ef8-d9767774c6f6" path="/var/lib/kubelet/pods/37a05df3-807b-4349-9ef8-d9767774c6f6/volumes" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.624797 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3dd629e-01fc-495c-926b-b72c5fe0f2f9" path="/var/lib/kubelet/pods/a3dd629e-01fc-495c-926b-b72c5fe0f2f9/volumes" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.713275 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gqn2r" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.810668 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-config-data\") pod \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.810797 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jpmkk\" (UniqueName: \"kubernetes.io/projected/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-kube-api-access-jpmkk\") pod \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.810838 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-combined-ca-bundle\") pod \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.810886 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-logs\") pod \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.810989 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-scripts\") pod \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\" (UID: \"d4a545d2-ff3c-4a27-b210-4803cdbf3c86\") " Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.812353 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-logs" (OuterVolumeSpecName: "logs") pod "d4a545d2-ff3c-4a27-b210-4803cdbf3c86" (UID: "d4a545d2-ff3c-4a27-b210-4803cdbf3c86"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.815472 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-scripts" (OuterVolumeSpecName: "scripts") pod "d4a545d2-ff3c-4a27-b210-4803cdbf3c86" (UID: "d4a545d2-ff3c-4a27-b210-4803cdbf3c86"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.816175 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-kube-api-access-jpmkk" (OuterVolumeSpecName: "kube-api-access-jpmkk") pod "d4a545d2-ff3c-4a27-b210-4803cdbf3c86" (UID: "d4a545d2-ff3c-4a27-b210-4803cdbf3c86"). InnerVolumeSpecName "kube-api-access-jpmkk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.835655 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d4a545d2-ff3c-4a27-b210-4803cdbf3c86" (UID: "d4a545d2-ff3c-4a27-b210-4803cdbf3c86"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.844911 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-config-data" (OuterVolumeSpecName: "config-data") pod "d4a545d2-ff3c-4a27-b210-4803cdbf3c86" (UID: "d4a545d2-ff3c-4a27-b210-4803cdbf3c86"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.912932 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.912978 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jpmkk\" (UniqueName: \"kubernetes.io/projected/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-kube-api-access-jpmkk\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.912999 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.913015 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:36 crc kubenswrapper[4932]: I1125 09:09:36.913027 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4a545d2-ff3c-4a27-b210-4803cdbf3c86-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.257302 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-gqn2r" event={"ID":"d4a545d2-ff3c-4a27-b210-4803cdbf3c86","Type":"ContainerDied","Data":"ff2553a1d7dcb8a83bdf4f7ff058dc1daaa305d6794ac9410999fd741cc9a681"} Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.257650 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff2553a1d7dcb8a83bdf4f7ff058dc1daaa305d6794ac9410999fd741cc9a681" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.257593 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-gqn2r" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.259391 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"28020cd8-f0a6-4aa9-80e6-4aa92b554850","Type":"ContainerStarted","Data":"836daad55f6e949c097ffbd328c1d276d8bce3eb113e0c6df992d8826f6a195d"} Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.263417 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.270742 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="37a05df3-807b-4349-9ef8-d9767774c6f6" podUID="28020cd8-f0a6-4aa9-80e6-4aa92b554850" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.373353 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5cfb6b64bb-8mrcr"] Nov 25 09:09:37 crc kubenswrapper[4932]: E1125 09:09:37.373785 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4a545d2-ff3c-4a27-b210-4803cdbf3c86" containerName="placement-db-sync" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.373808 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4a545d2-ff3c-4a27-b210-4803cdbf3c86" containerName="placement-db-sync" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.375582 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4a545d2-ff3c-4a27-b210-4803cdbf3c86" containerName="placement-db-sync" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.376531 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.378864 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.379335 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.379441 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.379630 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-m2ssv" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.379484 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.453580 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5cfb6b64bb-8mrcr"] Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.531148 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-public-tls-certs\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.531248 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7h5z9\" (UniqueName: \"kubernetes.io/projected/7a1917d6-4455-4cf5-b932-a38584663b02-kube-api-access-7h5z9\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.531296 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-internal-tls-certs\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.531336 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a1917d6-4455-4cf5-b932-a38584663b02-logs\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.531382 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-scripts\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.531401 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-combined-ca-bundle\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.531416 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-config-data\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.633245 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-internal-tls-certs\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.634458 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a1917d6-4455-4cf5-b932-a38584663b02-logs\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.634539 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-scripts\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.634570 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-combined-ca-bundle\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.634641 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-config-data\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.634711 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-public-tls-certs\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.634774 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7h5z9\" (UniqueName: \"kubernetes.io/projected/7a1917d6-4455-4cf5-b932-a38584663b02-kube-api-access-7h5z9\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.634884 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a1917d6-4455-4cf5-b932-a38584663b02-logs\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.638474 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-internal-tls-certs\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.651806 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-scripts\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.651894 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-config-data\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.652014 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-public-tls-certs\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.664007 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-combined-ca-bundle\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.664697 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7h5z9\" (UniqueName: \"kubernetes.io/projected/7a1917d6-4455-4cf5-b932-a38584663b02-kube-api-access-7h5z9\") pod \"placement-5cfb6b64bb-8mrcr\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:37 crc kubenswrapper[4932]: I1125 09:09:37.749513 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:38 crc kubenswrapper[4932]: I1125 09:09:38.262821 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5cfb6b64bb-8mrcr"] Nov 25 09:09:38 crc kubenswrapper[4932]: I1125 09:09:38.279938 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5cfb6b64bb-8mrcr" event={"ID":"7a1917d6-4455-4cf5-b932-a38584663b02","Type":"ContainerStarted","Data":"b81c5bb52f33b60fe0a42b50c16ccd2d473cb0b0834a2dd25ffd6852cefa5228"} Nov 25 09:09:38 crc kubenswrapper[4932]: I1125 09:09:38.844010 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:09:38 crc kubenswrapper[4932]: I1125 09:09:38.915349 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf79d967-28wbk"] Nov 25 09:09:38 crc kubenswrapper[4932]: I1125 09:09:38.916082 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cf79d967-28wbk" podUID="db6788e6-9095-4e4f-b7d2-0efe04074361" containerName="dnsmasq-dns" containerID="cri-o://870f7f3b9f9ce5304a5a4f411f73693726174f2d71c3db4819adbc089c91149f" gracePeriod=10 Nov 25 09:09:39 crc kubenswrapper[4932]: I1125 09:09:39.290223 4932 generic.go:334] "Generic (PLEG): container finished" podID="db6788e6-9095-4e4f-b7d2-0efe04074361" containerID="870f7f3b9f9ce5304a5a4f411f73693726174f2d71c3db4819adbc089c91149f" exitCode=0 Nov 25 09:09:39 crc kubenswrapper[4932]: I1125 09:09:39.290281 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf79d967-28wbk" event={"ID":"db6788e6-9095-4e4f-b7d2-0efe04074361","Type":"ContainerDied","Data":"870f7f3b9f9ce5304a5a4f411f73693726174f2d71c3db4819adbc089c91149f"} Nov 25 09:09:39 crc kubenswrapper[4932]: I1125 09:09:39.291490 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5cfb6b64bb-8mrcr" event={"ID":"7a1917d6-4455-4cf5-b932-a38584663b02","Type":"ContainerStarted","Data":"37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073"} Nov 25 09:09:39 crc kubenswrapper[4932]: I1125 09:09:39.291817 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5cfb6b64bb-8mrcr" event={"ID":"7a1917d6-4455-4cf5-b932-a38584663b02","Type":"ContainerStarted","Data":"a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067"} Nov 25 09:09:39 crc kubenswrapper[4932]: I1125 09:09:39.292145 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:39 crc kubenswrapper[4932]: I1125 09:09:39.292181 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:09:39 crc kubenswrapper[4932]: I1125 09:09:39.328497 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5cfb6b64bb-8mrcr" podStartSLOduration=2.328474902 podStartE2EDuration="2.328474902s" podCreationTimestamp="2025-11-25 09:09:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:39.31304886 +0000 UTC m=+1239.439078423" watchObservedRunningTime="2025-11-25 09:09:39.328474902 +0000 UTC m=+1239.454504475" Nov 25 09:09:39 crc kubenswrapper[4932]: I1125 09:09:39.976090 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.099608 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-dns-svc\") pod \"db6788e6-9095-4e4f-b7d2-0efe04074361\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.099949 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-config\") pod \"db6788e6-9095-4e4f-b7d2-0efe04074361\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.099986 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-nb\") pod \"db6788e6-9095-4e4f-b7d2-0efe04074361\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.100017 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-sb\") pod \"db6788e6-9095-4e4f-b7d2-0efe04074361\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.100233 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5fb9\" (UniqueName: \"kubernetes.io/projected/db6788e6-9095-4e4f-b7d2-0efe04074361-kube-api-access-k5fb9\") pod \"db6788e6-9095-4e4f-b7d2-0efe04074361\" (UID: \"db6788e6-9095-4e4f-b7d2-0efe04074361\") " Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.110522 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db6788e6-9095-4e4f-b7d2-0efe04074361-kube-api-access-k5fb9" (OuterVolumeSpecName: "kube-api-access-k5fb9") pod "db6788e6-9095-4e4f-b7d2-0efe04074361" (UID: "db6788e6-9095-4e4f-b7d2-0efe04074361"). InnerVolumeSpecName "kube-api-access-k5fb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.161872 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-config" (OuterVolumeSpecName: "config") pod "db6788e6-9095-4e4f-b7d2-0efe04074361" (UID: "db6788e6-9095-4e4f-b7d2-0efe04074361"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.165076 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "db6788e6-9095-4e4f-b7d2-0efe04074361" (UID: "db6788e6-9095-4e4f-b7d2-0efe04074361"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.171129 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "db6788e6-9095-4e4f-b7d2-0efe04074361" (UID: "db6788e6-9095-4e4f-b7d2-0efe04074361"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.171293 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "db6788e6-9095-4e4f-b7d2-0efe04074361" (UID: "db6788e6-9095-4e4f-b7d2-0efe04074361"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.201745 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.201779 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.201790 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.201799 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db6788e6-9095-4e4f-b7d2-0efe04074361-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.201808 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5fb9\" (UniqueName: \"kubernetes.io/projected/db6788e6-9095-4e4f-b7d2-0efe04074361-kube-api-access-k5fb9\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.319838 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf79d967-28wbk" event={"ID":"db6788e6-9095-4e4f-b7d2-0efe04074361","Type":"ContainerDied","Data":"341e8da74be88ed15f39b3fc000721cfe332115eb8445f01e0e5488afbffe374"} Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.319901 4932 scope.go:117] "RemoveContainer" containerID="870f7f3b9f9ce5304a5a4f411f73693726174f2d71c3db4819adbc089c91149f" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.320084 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf79d967-28wbk" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.359303 4932 scope.go:117] "RemoveContainer" containerID="3b3421dd62e68ac97bc91ed0787da5646e35ef1814ecacbfc3076ea4e847d0a4" Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.367170 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf79d967-28wbk"] Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.381597 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf79d967-28wbk"] Nov 25 09:09:40 crc kubenswrapper[4932]: I1125 09:09:40.621994 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db6788e6-9095-4e4f-b7d2-0efe04074361" path="/var/lib/kubelet/pods/db6788e6-9095-4e4f-b7d2-0efe04074361/volumes" Nov 25 09:09:43 crc kubenswrapper[4932]: I1125 09:09:43.385720 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:09:43 crc kubenswrapper[4932]: I1125 09:09:43.386659 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="ceilometer-central-agent" containerID="cri-o://354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55" gracePeriod=30 Nov 25 09:09:43 crc kubenswrapper[4932]: I1125 09:09:43.386825 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="proxy-httpd" containerID="cri-o://32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539" gracePeriod=30 Nov 25 09:09:43 crc kubenswrapper[4932]: I1125 09:09:43.386878 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="sg-core" containerID="cri-o://6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642" gracePeriod=30 Nov 25 09:09:43 crc kubenswrapper[4932]: I1125 09:09:43.386909 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="ceilometer-notification-agent" containerID="cri-o://fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65" gracePeriod=30 Nov 25 09:09:43 crc kubenswrapper[4932]: I1125 09:09:43.394053 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.157:3000/\": EOF" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.102626 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7f5484589f-8gmzk"] Nov 25 09:09:44 crc kubenswrapper[4932]: E1125 09:09:44.104664 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db6788e6-9095-4e4f-b7d2-0efe04074361" containerName="init" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.104708 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="db6788e6-9095-4e4f-b7d2-0efe04074361" containerName="init" Nov 25 09:09:44 crc kubenswrapper[4932]: E1125 09:09:44.104735 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db6788e6-9095-4e4f-b7d2-0efe04074361" containerName="dnsmasq-dns" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.104745 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="db6788e6-9095-4e4f-b7d2-0efe04074361" containerName="dnsmasq-dns" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.104956 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="db6788e6-9095-4e4f-b7d2-0efe04074361" containerName="dnsmasq-dns" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.106331 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.108396 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.108801 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.108840 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.118357 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7f5484589f-8gmzk"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.150671 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-q55gm"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.153838 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-q55gm" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.162870 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-q55gm"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.175710 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-public-tls-certs\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.175787 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-run-httpd\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.175830 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-config-data\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.176017 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-combined-ca-bundle\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.176762 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrbvd\" (UniqueName: \"kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-kube-api-access-xrbvd\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.176808 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-log-httpd\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.176832 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-etc-swift\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.176865 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-internal-tls-certs\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.176963 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndjhp\" (UniqueName: \"kubernetes.io/projected/339c14cf-c65c-41e7-a983-d16b36bf01ea-kube-api-access-ndjhp\") pod \"nova-api-db-create-q55gm\" (UID: \"339c14cf-c65c-41e7-a983-d16b36bf01ea\") " pod="openstack/nova-api-db-create-q55gm" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.176994 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/339c14cf-c65c-41e7-a983-d16b36bf01ea-operator-scripts\") pod \"nova-api-db-create-q55gm\" (UID: \"339c14cf-c65c-41e7-a983-d16b36bf01ea\") " pod="openstack/nova-api-db-create-q55gm" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.246230 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-5mlqd"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.263532 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-5mlqd" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.265230 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-5mlqd"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.279435 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrbvd\" (UniqueName: \"kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-kube-api-access-xrbvd\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.279495 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-log-httpd\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.279527 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-etc-swift\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.279565 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-internal-tls-certs\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.279700 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndjhp\" (UniqueName: \"kubernetes.io/projected/339c14cf-c65c-41e7-a983-d16b36bf01ea-kube-api-access-ndjhp\") pod \"nova-api-db-create-q55gm\" (UID: \"339c14cf-c65c-41e7-a983-d16b36bf01ea\") " pod="openstack/nova-api-db-create-q55gm" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.279769 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/339c14cf-c65c-41e7-a983-d16b36bf01ea-operator-scripts\") pod \"nova-api-db-create-q55gm\" (UID: \"339c14cf-c65c-41e7-a983-d16b36bf01ea\") " pod="openstack/nova-api-db-create-q55gm" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.279994 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-public-tls-certs\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.280067 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-log-httpd\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.280104 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-run-httpd\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.280136 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwx72\" (UniqueName: \"kubernetes.io/projected/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-kube-api-access-rwx72\") pod \"nova-cell0-db-create-5mlqd\" (UID: \"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1\") " pod="openstack/nova-cell0-db-create-5mlqd" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.280312 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-config-data\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.280391 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-operator-scripts\") pod \"nova-cell0-db-create-5mlqd\" (UID: \"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1\") " pod="openstack/nova-cell0-db-create-5mlqd" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.280485 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-combined-ca-bundle\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.281012 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-run-httpd\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.297610 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-public-tls-certs\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.300262 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-combined-ca-bundle\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.300732 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/339c14cf-c65c-41e7-a983-d16b36bf01ea-operator-scripts\") pod \"nova-api-db-create-q55gm\" (UID: \"339c14cf-c65c-41e7-a983-d16b36bf01ea\") " pod="openstack/nova-api-db-create-q55gm" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.312228 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-config-data\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.313060 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrbvd\" (UniqueName: \"kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-kube-api-access-xrbvd\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.313872 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-internal-tls-certs\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.314448 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndjhp\" (UniqueName: \"kubernetes.io/projected/339c14cf-c65c-41e7-a983-d16b36bf01ea-kube-api-access-ndjhp\") pod \"nova-api-db-create-q55gm\" (UID: \"339c14cf-c65c-41e7-a983-d16b36bf01ea\") " pod="openstack/nova-api-db-create-q55gm" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.322685 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-etc-swift\") pod \"swift-proxy-7f5484589f-8gmzk\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.351323 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-7e0a-account-create-4tk2l"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.352519 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7e0a-account-create-4tk2l" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.355562 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.367416 4932 generic.go:334] "Generic (PLEG): container finished" podID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerID="32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539" exitCode=0 Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.367459 4932 generic.go:334] "Generic (PLEG): container finished" podID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerID="6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642" exitCode=2 Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.367469 4932 generic.go:334] "Generic (PLEG): container finished" podID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerID="354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55" exitCode=0 Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.367486 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"712f11c0-89ae-4e39-b6c6-3bb7303ccaae","Type":"ContainerDied","Data":"32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539"} Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.367567 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"712f11c0-89ae-4e39-b6c6-3bb7303ccaae","Type":"ContainerDied","Data":"6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642"} Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.367584 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"712f11c0-89ae-4e39-b6c6-3bb7303ccaae","Type":"ContainerDied","Data":"354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55"} Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.370380 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7e0a-account-create-4tk2l"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.382465 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/afee3e89-74a3-4ad0-ac06-c5c97efa8543-operator-scripts\") pod \"nova-api-7e0a-account-create-4tk2l\" (UID: \"afee3e89-74a3-4ad0-ac06-c5c97efa8543\") " pod="openstack/nova-api-7e0a-account-create-4tk2l" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.382620 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwx72\" (UniqueName: \"kubernetes.io/projected/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-kube-api-access-rwx72\") pod \"nova-cell0-db-create-5mlqd\" (UID: \"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1\") " pod="openstack/nova-cell0-db-create-5mlqd" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.382648 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xmtx\" (UniqueName: \"kubernetes.io/projected/afee3e89-74a3-4ad0-ac06-c5c97efa8543-kube-api-access-4xmtx\") pod \"nova-api-7e0a-account-create-4tk2l\" (UID: \"afee3e89-74a3-4ad0-ac06-c5c97efa8543\") " pod="openstack/nova-api-7e0a-account-create-4tk2l" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.382687 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-operator-scripts\") pod \"nova-cell0-db-create-5mlqd\" (UID: \"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1\") " pod="openstack/nova-cell0-db-create-5mlqd" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.383542 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-operator-scripts\") pod \"nova-cell0-db-create-5mlqd\" (UID: \"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1\") " pod="openstack/nova-cell0-db-create-5mlqd" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.405149 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwx72\" (UniqueName: \"kubernetes.io/projected/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-kube-api-access-rwx72\") pod \"nova-cell0-db-create-5mlqd\" (UID: \"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1\") " pod="openstack/nova-cell0-db-create-5mlqd" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.425656 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-q67zl"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.426124 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.426931 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-q67zl" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.447128 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-q67zl"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.475018 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-q55gm" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.485987 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xmtx\" (UniqueName: \"kubernetes.io/projected/afee3e89-74a3-4ad0-ac06-c5c97efa8543-kube-api-access-4xmtx\") pod \"nova-api-7e0a-account-create-4tk2l\" (UID: \"afee3e89-74a3-4ad0-ac06-c5c97efa8543\") " pod="openstack/nova-api-7e0a-account-create-4tk2l" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.486073 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/afee3e89-74a3-4ad0-ac06-c5c97efa8543-operator-scripts\") pod \"nova-api-7e0a-account-create-4tk2l\" (UID: \"afee3e89-74a3-4ad0-ac06-c5c97efa8543\") " pod="openstack/nova-api-7e0a-account-create-4tk2l" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.486111 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-operator-scripts\") pod \"nova-cell1-db-create-q67zl\" (UID: \"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046\") " pod="openstack/nova-cell1-db-create-q67zl" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.486130 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzmpk\" (UniqueName: \"kubernetes.io/projected/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-kube-api-access-qzmpk\") pod \"nova-cell1-db-create-q67zl\" (UID: \"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046\") " pod="openstack/nova-cell1-db-create-q67zl" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.487100 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/afee3e89-74a3-4ad0-ac06-c5c97efa8543-operator-scripts\") pod \"nova-api-7e0a-account-create-4tk2l\" (UID: \"afee3e89-74a3-4ad0-ac06-c5c97efa8543\") " pod="openstack/nova-api-7e0a-account-create-4tk2l" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.508268 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xmtx\" (UniqueName: \"kubernetes.io/projected/afee3e89-74a3-4ad0-ac06-c5c97efa8543-kube-api-access-4xmtx\") pod \"nova-api-7e0a-account-create-4tk2l\" (UID: \"afee3e89-74a3-4ad0-ac06-c5c97efa8543\") " pod="openstack/nova-api-7e0a-account-create-4tk2l" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.532855 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-a937-account-create-d2hjq"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.533934 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a937-account-create-d2hjq" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.536814 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.558240 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a937-account-create-d2hjq"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.587799 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-operator-scripts\") pod \"nova-cell1-db-create-q67zl\" (UID: \"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046\") " pod="openstack/nova-cell1-db-create-q67zl" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.587847 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzmpk\" (UniqueName: \"kubernetes.io/projected/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-kube-api-access-qzmpk\") pod \"nova-cell1-db-create-q67zl\" (UID: \"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046\") " pod="openstack/nova-cell1-db-create-q67zl" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.590161 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-operator-scripts\") pod \"nova-cell1-db-create-q67zl\" (UID: \"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046\") " pod="openstack/nova-cell1-db-create-q67zl" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.598967 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-5mlqd" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.605783 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzmpk\" (UniqueName: \"kubernetes.io/projected/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-kube-api-access-qzmpk\") pod \"nova-cell1-db-create-q67zl\" (UID: \"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046\") " pod="openstack/nova-cell1-db-create-q67zl" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.691016 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66dzt\" (UniqueName: \"kubernetes.io/projected/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-kube-api-access-66dzt\") pod \"nova-cell0-a937-account-create-d2hjq\" (UID: \"fc642fd3-edd5-4598-9eeb-06bdb9748b1a\") " pod="openstack/nova-cell0-a937-account-create-d2hjq" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.691202 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-operator-scripts\") pod \"nova-cell0-a937-account-create-d2hjq\" (UID: \"fc642fd3-edd5-4598-9eeb-06bdb9748b1a\") " pod="openstack/nova-cell0-a937-account-create-d2hjq" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.725068 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-bc42-account-create-ppnl9"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.726391 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-bc42-account-create-ppnl9" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.728634 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.736707 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-bc42-account-create-ppnl9"] Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.753123 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7e0a-account-create-4tk2l" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.760450 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-q67zl" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.794364 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66dzt\" (UniqueName: \"kubernetes.io/projected/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-kube-api-access-66dzt\") pod \"nova-cell0-a937-account-create-d2hjq\" (UID: \"fc642fd3-edd5-4598-9eeb-06bdb9748b1a\") " pod="openstack/nova-cell0-a937-account-create-d2hjq" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.794423 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dglhv\" (UniqueName: \"kubernetes.io/projected/cbf19371-546d-4971-a555-443c36b129be-kube-api-access-dglhv\") pod \"nova-cell1-bc42-account-create-ppnl9\" (UID: \"cbf19371-546d-4971-a555-443c36b129be\") " pod="openstack/nova-cell1-bc42-account-create-ppnl9" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.794523 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-operator-scripts\") pod \"nova-cell0-a937-account-create-d2hjq\" (UID: \"fc642fd3-edd5-4598-9eeb-06bdb9748b1a\") " pod="openstack/nova-cell0-a937-account-create-d2hjq" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.795323 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbf19371-546d-4971-a555-443c36b129be-operator-scripts\") pod \"nova-cell1-bc42-account-create-ppnl9\" (UID: \"cbf19371-546d-4971-a555-443c36b129be\") " pod="openstack/nova-cell1-bc42-account-create-ppnl9" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.796371 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-operator-scripts\") pod \"nova-cell0-a937-account-create-d2hjq\" (UID: \"fc642fd3-edd5-4598-9eeb-06bdb9748b1a\") " pod="openstack/nova-cell0-a937-account-create-d2hjq" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.813205 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66dzt\" (UniqueName: \"kubernetes.io/projected/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-kube-api-access-66dzt\") pod \"nova-cell0-a937-account-create-d2hjq\" (UID: \"fc642fd3-edd5-4598-9eeb-06bdb9748b1a\") " pod="openstack/nova-cell0-a937-account-create-d2hjq" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.865775 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a937-account-create-d2hjq" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.896778 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dglhv\" (UniqueName: \"kubernetes.io/projected/cbf19371-546d-4971-a555-443c36b129be-kube-api-access-dglhv\") pod \"nova-cell1-bc42-account-create-ppnl9\" (UID: \"cbf19371-546d-4971-a555-443c36b129be\") " pod="openstack/nova-cell1-bc42-account-create-ppnl9" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.896888 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbf19371-546d-4971-a555-443c36b129be-operator-scripts\") pod \"nova-cell1-bc42-account-create-ppnl9\" (UID: \"cbf19371-546d-4971-a555-443c36b129be\") " pod="openstack/nova-cell1-bc42-account-create-ppnl9" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.897861 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbf19371-546d-4971-a555-443c36b129be-operator-scripts\") pod \"nova-cell1-bc42-account-create-ppnl9\" (UID: \"cbf19371-546d-4971-a555-443c36b129be\") " pod="openstack/nova-cell1-bc42-account-create-ppnl9" Nov 25 09:09:44 crc kubenswrapper[4932]: I1125 09:09:44.913697 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dglhv\" (UniqueName: \"kubernetes.io/projected/cbf19371-546d-4971-a555-443c36b129be-kube-api-access-dglhv\") pod \"nova-cell1-bc42-account-create-ppnl9\" (UID: \"cbf19371-546d-4971-a555-443c36b129be\") " pod="openstack/nova-cell1-bc42-account-create-ppnl9" Nov 25 09:09:45 crc kubenswrapper[4932]: I1125 09:09:45.051006 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-bc42-account-create-ppnl9" Nov 25 09:09:46 crc kubenswrapper[4932]: I1125 09:09:46.994636 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-a937-account-create-d2hjq"] Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.006156 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-bc42-account-create-ppnl9"] Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.106626 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-q55gm"] Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.118974 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-q67zl"] Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.127268 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7e0a-account-create-4tk2l"] Nov 25 09:09:47 crc kubenswrapper[4932]: W1125 09:09:47.145491 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod339c14cf_c65c_41e7_a983_d16b36bf01ea.slice/crio-f735a31dd042cfe5cb1b07358434fc602ddaa96c7c78d6a6845c3305673e1d2e WatchSource:0}: Error finding container f735a31dd042cfe5cb1b07358434fc602ddaa96c7c78d6a6845c3305673e1d2e: Status 404 returned error can't find the container with id f735a31dd042cfe5cb1b07358434fc602ddaa96c7c78d6a6845c3305673e1d2e Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.169004 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-5mlqd"] Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.315132 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7f5484589f-8gmzk"] Nov 25 09:09:47 crc kubenswrapper[4932]: W1125 09:09:47.317585 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e365f51_6fe5_47b3_b183_5cf5cae5c65e.slice/crio-8e665194bc97ba844124369d64739611e89e9cf21dbcf6b1a53684060eef9389 WatchSource:0}: Error finding container 8e665194bc97ba844124369d64739611e89e9cf21dbcf6b1a53684060eef9389: Status 404 returned error can't find the container with id 8e665194bc97ba844124369d64739611e89e9cf21dbcf6b1a53684060eef9389 Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.399387 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f5484589f-8gmzk" event={"ID":"9e365f51-6fe5-47b3-b183-5cf5cae5c65e","Type":"ContainerStarted","Data":"8e665194bc97ba844124369d64739611e89e9cf21dbcf6b1a53684060eef9389"} Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.401083 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a937-account-create-d2hjq" event={"ID":"fc642fd3-edd5-4598-9eeb-06bdb9748b1a","Type":"ContainerStarted","Data":"840c4444509d964f704eb4169695c50b63d3050f96c65c8c60c1559a9d6368da"} Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.402317 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-5mlqd" event={"ID":"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1","Type":"ContainerStarted","Data":"27191d8b21ec33c2378bccda1b230b32918d9fb20ee7152b95fbbe41d095e92e"} Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.403399 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-bc42-account-create-ppnl9" event={"ID":"cbf19371-546d-4971-a555-443c36b129be","Type":"ContainerStarted","Data":"28885e5ae0234685a8c0bded02ec874704c339140a102849743703a6f0ff5e44"} Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.404403 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7e0a-account-create-4tk2l" event={"ID":"afee3e89-74a3-4ad0-ac06-c5c97efa8543","Type":"ContainerStarted","Data":"ab80d9c5c2501edec512c019e6c97c4c07ba8c121d562ca1fdc692673649723c"} Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.406352 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-q55gm" event={"ID":"339c14cf-c65c-41e7-a983-d16b36bf01ea","Type":"ContainerStarted","Data":"f735a31dd042cfe5cb1b07358434fc602ddaa96c7c78d6a6845c3305673e1d2e"} Nov 25 09:09:47 crc kubenswrapper[4932]: I1125 09:09:47.407511 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-q67zl" event={"ID":"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046","Type":"ContainerStarted","Data":"c1b26e3ded5787c92264ece3438207d9b04466d5c0e1105d5651f2617205f6b9"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.266132 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.418420 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-q55gm" event={"ID":"339c14cf-c65c-41e7-a983-d16b36bf01ea","Type":"ContainerStarted","Data":"75148f61ebc41098dc7bed57c08e55401d84cfe49c16eea03690a213f89fa9e6"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.420634 4932 generic.go:334] "Generic (PLEG): container finished" podID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerID="fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65" exitCode=0 Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.420683 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"712f11c0-89ae-4e39-b6c6-3bb7303ccaae","Type":"ContainerDied","Data":"fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.420700 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"712f11c0-89ae-4e39-b6c6-3bb7303ccaae","Type":"ContainerDied","Data":"d810a46699183fdd7a84d5f6d4758de112ef4594ada74290219dfd44e3cc4c26"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.420716 4932 scope.go:117] "RemoveContainer" containerID="32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.420719 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.427435 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f5484589f-8gmzk" event={"ID":"9e365f51-6fe5-47b3-b183-5cf5cae5c65e","Type":"ContainerStarted","Data":"bb6122d938bb9d23fba3db816a5bc8cfafc993ae9ba9ebacdda64a4c57056966"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.427483 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f5484589f-8gmzk" event={"ID":"9e365f51-6fe5-47b3-b183-5cf5cae5c65e","Type":"ContainerStarted","Data":"53bcbc203394b3c852ba1c6182bc8eaf5e1970de1e3b7f900c6947cac59286d4"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.427533 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.427629 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.433419 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"28020cd8-f0a6-4aa9-80e6-4aa92b554850","Type":"ContainerStarted","Data":"96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.436737 4932 generic.go:334] "Generic (PLEG): container finished" podID="cbf19371-546d-4971-a555-443c36b129be" containerID="adf6076cc75094913a8f42ea8fcd7116b3db72502f1ce0681414098453d4e7ce" exitCode=0 Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.436809 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-bc42-account-create-ppnl9" event={"ID":"cbf19371-546d-4971-a555-443c36b129be","Type":"ContainerDied","Data":"adf6076cc75094913a8f42ea8fcd7116b3db72502f1ce0681414098453d4e7ce"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.437888 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-q55gm" podStartSLOduration=4.437868922 podStartE2EDuration="4.437868922s" podCreationTimestamp="2025-11-25 09:09:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:48.433539815 +0000 UTC m=+1248.559569388" watchObservedRunningTime="2025-11-25 09:09:48.437868922 +0000 UTC m=+1248.563898495" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.438559 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7e0a-account-create-4tk2l" event={"ID":"afee3e89-74a3-4ad0-ac06-c5c97efa8543","Type":"ContainerStarted","Data":"8861de718b935ea10a4da1b0bd44557dea4ff94e37f66b0d78a10585c74a92f6"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.440002 4932 generic.go:334] "Generic (PLEG): container finished" podID="6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046" containerID="f728aa03da6869a351a6020593c1a1aa2ec915e5a09d978c9e647881d5686d77" exitCode=0 Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.440056 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-q67zl" event={"ID":"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046","Type":"ContainerDied","Data":"f728aa03da6869a351a6020593c1a1aa2ec915e5a09d978c9e647881d5686d77"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.441784 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a937-account-create-d2hjq" event={"ID":"fc642fd3-edd5-4598-9eeb-06bdb9748b1a","Type":"ContainerStarted","Data":"3cc44a4bb5bf8607df01e273c8abc8040ee6601acb99979bb6dd8efe2be567aa"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.443801 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-5mlqd" event={"ID":"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1","Type":"ContainerStarted","Data":"f66e9f2ad343c3b0f6e212719d6e385729f097478aabe9d1af995fb102ee097e"} Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.452547 4932 scope.go:117] "RemoveContainer" containerID="6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.465836 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-log-httpd\") pod \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.465884 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k882j\" (UniqueName: \"kubernetes.io/projected/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-kube-api-access-k882j\") pod \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.465926 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-sg-core-conf-yaml\") pod \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.466005 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-scripts\") pod \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.466044 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-combined-ca-bundle\") pod \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.466103 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-run-httpd\") pod \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.466135 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-config-data\") pod \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\" (UID: \"712f11c0-89ae-4e39-b6c6-3bb7303ccaae\") " Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.466481 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "712f11c0-89ae-4e39-b6c6-3bb7303ccaae" (UID: "712f11c0-89ae-4e39-b6c6-3bb7303ccaae"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.467784 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.468043 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "712f11c0-89ae-4e39-b6c6-3bb7303ccaae" (UID: "712f11c0-89ae-4e39-b6c6-3bb7303ccaae"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.472167 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-kube-api-access-k882j" (OuterVolumeSpecName: "kube-api-access-k882j") pod "712f11c0-89ae-4e39-b6c6-3bb7303ccaae" (UID: "712f11c0-89ae-4e39-b6c6-3bb7303ccaae"). InnerVolumeSpecName "kube-api-access-k882j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.472858 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-scripts" (OuterVolumeSpecName: "scripts") pod "712f11c0-89ae-4e39-b6c6-3bb7303ccaae" (UID: "712f11c0-89ae-4e39-b6c6-3bb7303ccaae"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.482697 4932 scope.go:117] "RemoveContainer" containerID="fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.505280 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7f5484589f-8gmzk" podStartSLOduration=4.505166716 podStartE2EDuration="4.505166716s" podCreationTimestamp="2025-11-25 09:09:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:48.465238685 +0000 UTC m=+1248.591268268" watchObservedRunningTime="2025-11-25 09:09:48.505166716 +0000 UTC m=+1248.631196299" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.506654 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.812805469 podStartE2EDuration="13.506643609s" podCreationTimestamp="2025-11-25 09:09:35 +0000 UTC" firstStartedPulling="2025-11-25 09:09:36.30453828 +0000 UTC m=+1236.430567843" lastFinishedPulling="2025-11-25 09:09:47.99837642 +0000 UTC m=+1248.124405983" observedRunningTime="2025-11-25 09:09:48.493309268 +0000 UTC m=+1248.619338861" watchObservedRunningTime="2025-11-25 09:09:48.506643609 +0000 UTC m=+1248.632673162" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.522014 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "712f11c0-89ae-4e39-b6c6-3bb7303ccaae" (UID: "712f11c0-89ae-4e39-b6c6-3bb7303ccaae"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.522696 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-a937-account-create-d2hjq" podStartSLOduration=4.5226816 podStartE2EDuration="4.5226816s" podCreationTimestamp="2025-11-25 09:09:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:48.515354685 +0000 UTC m=+1248.641384238" watchObservedRunningTime="2025-11-25 09:09:48.5226816 +0000 UTC m=+1248.648711153" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.549066 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-5mlqd" podStartSLOduration=4.549047473 podStartE2EDuration="4.549047473s" podCreationTimestamp="2025-11-25 09:09:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:48.545922842 +0000 UTC m=+1248.671952405" watchObservedRunningTime="2025-11-25 09:09:48.549047473 +0000 UTC m=+1248.675077036" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.567391 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "712f11c0-89ae-4e39-b6c6-3bb7303ccaae" (UID: "712f11c0-89ae-4e39-b6c6-3bb7303ccaae"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.570813 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k882j\" (UniqueName: \"kubernetes.io/projected/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-kube-api-access-k882j\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.570846 4932 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.570860 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.570873 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.570884 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.590303 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-7e0a-account-create-4tk2l" podStartSLOduration=4.590279993 podStartE2EDuration="4.590279993s" podCreationTimestamp="2025-11-25 09:09:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:09:48.578638031 +0000 UTC m=+1248.704667594" watchObservedRunningTime="2025-11-25 09:09:48.590279993 +0000 UTC m=+1248.716309586" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.638468 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-config-data" (OuterVolumeSpecName: "config-data") pod "712f11c0-89ae-4e39-b6c6-3bb7303ccaae" (UID: "712f11c0-89ae-4e39-b6c6-3bb7303ccaae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.664030 4932 scope.go:117] "RemoveContainer" containerID="354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.672113 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/712f11c0-89ae-4e39-b6c6-3bb7303ccaae-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.691997 4932 scope.go:117] "RemoveContainer" containerID="32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539" Nov 25 09:09:48 crc kubenswrapper[4932]: E1125 09:09:48.692431 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539\": container with ID starting with 32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539 not found: ID does not exist" containerID="32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.692477 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539"} err="failed to get container status \"32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539\": rpc error: code = NotFound desc = could not find container \"32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539\": container with ID starting with 32c008a5e7e8f7cd1c5787217c0efaba1ca48cf4067c7f56d73199aac1c41539 not found: ID does not exist" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.692504 4932 scope.go:117] "RemoveContainer" containerID="6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642" Nov 25 09:09:48 crc kubenswrapper[4932]: E1125 09:09:48.692743 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642\": container with ID starting with 6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642 not found: ID does not exist" containerID="6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.692764 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642"} err="failed to get container status \"6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642\": rpc error: code = NotFound desc = could not find container \"6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642\": container with ID starting with 6fed213e7e0b6a91f9095eb877b71637f39cdd3e6e17fb5685afcff86d52b642 not found: ID does not exist" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.692778 4932 scope.go:117] "RemoveContainer" containerID="fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65" Nov 25 09:09:48 crc kubenswrapper[4932]: E1125 09:09:48.693696 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65\": container with ID starting with fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65 not found: ID does not exist" containerID="fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.693720 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65"} err="failed to get container status \"fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65\": rpc error: code = NotFound desc = could not find container \"fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65\": container with ID starting with fe1b51aabe8e8e134bdde964b89758166518c8b518a155dd286d600851aa4b65 not found: ID does not exist" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.693735 4932 scope.go:117] "RemoveContainer" containerID="354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55" Nov 25 09:09:48 crc kubenswrapper[4932]: E1125 09:09:48.694288 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55\": container with ID starting with 354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55 not found: ID does not exist" containerID="354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.694342 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55"} err="failed to get container status \"354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55\": rpc error: code = NotFound desc = could not find container \"354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55\": container with ID starting with 354ff517a9abb09eac721996ad7a950a5c5b84037e3ddb368d467c3609a7bd55 not found: ID does not exist" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.786154 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.794063 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.814454 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:09:48 crc kubenswrapper[4932]: E1125 09:09:48.814999 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="proxy-httpd" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.815016 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="proxy-httpd" Nov 25 09:09:48 crc kubenswrapper[4932]: E1125 09:09:48.815036 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="ceilometer-central-agent" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.815044 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="ceilometer-central-agent" Nov 25 09:09:48 crc kubenswrapper[4932]: E1125 09:09:48.815072 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="sg-core" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.815079 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="sg-core" Nov 25 09:09:48 crc kubenswrapper[4932]: E1125 09:09:48.815095 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="ceilometer-notification-agent" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.815103 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="ceilometer-notification-agent" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.815343 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="ceilometer-central-agent" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.815362 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="ceilometer-notification-agent" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.815372 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="proxy-httpd" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.815391 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" containerName="sg-core" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.818118 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.822751 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.823286 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.823976 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.983415 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-scripts\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.983532 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.983703 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-run-httpd\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.983778 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-config-data\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.983825 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8hzt\" (UniqueName: \"kubernetes.io/projected/7301e0af-07da-4591-b6c7-62b14c109823-kube-api-access-k8hzt\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.983934 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:48 crc kubenswrapper[4932]: I1125 09:09:48.983974 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-log-httpd\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.085879 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.085926 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-log-httpd\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.085968 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-scripts\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.086029 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.086079 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-run-httpd\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.086106 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-config-data\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.086126 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8hzt\" (UniqueName: \"kubernetes.io/projected/7301e0af-07da-4591-b6c7-62b14c109823-kube-api-access-k8hzt\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.086835 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-log-httpd\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.087065 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-run-httpd\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.090109 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.091527 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-config-data\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.091956 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.103809 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-scripts\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.108409 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8hzt\" (UniqueName: \"kubernetes.io/projected/7301e0af-07da-4591-b6c7-62b14c109823-kube-api-access-k8hzt\") pod \"ceilometer-0\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.143493 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.453775 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc642fd3-edd5-4598-9eeb-06bdb9748b1a" containerID="3cc44a4bb5bf8607df01e273c8abc8040ee6601acb99979bb6dd8efe2be567aa" exitCode=0 Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.453871 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a937-account-create-d2hjq" event={"ID":"fc642fd3-edd5-4598-9eeb-06bdb9748b1a","Type":"ContainerDied","Data":"3cc44a4bb5bf8607df01e273c8abc8040ee6601acb99979bb6dd8efe2be567aa"} Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.456224 4932 generic.go:334] "Generic (PLEG): container finished" podID="b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1" containerID="f66e9f2ad343c3b0f6e212719d6e385729f097478aabe9d1af995fb102ee097e" exitCode=0 Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.456975 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-5mlqd" event={"ID":"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1","Type":"ContainerDied","Data":"f66e9f2ad343c3b0f6e212719d6e385729f097478aabe9d1af995fb102ee097e"} Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.458044 4932 generic.go:334] "Generic (PLEG): container finished" podID="afee3e89-74a3-4ad0-ac06-c5c97efa8543" containerID="8861de718b935ea10a4da1b0bd44557dea4ff94e37f66b0d78a10585c74a92f6" exitCode=0 Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.458105 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7e0a-account-create-4tk2l" event={"ID":"afee3e89-74a3-4ad0-ac06-c5c97efa8543","Type":"ContainerDied","Data":"8861de718b935ea10a4da1b0bd44557dea4ff94e37f66b0d78a10585c74a92f6"} Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.459864 4932 generic.go:334] "Generic (PLEG): container finished" podID="339c14cf-c65c-41e7-a983-d16b36bf01ea" containerID="75148f61ebc41098dc7bed57c08e55401d84cfe49c16eea03690a213f89fa9e6" exitCode=0 Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.459937 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-q55gm" event={"ID":"339c14cf-c65c-41e7-a983-d16b36bf01ea","Type":"ContainerDied","Data":"75148f61ebc41098dc7bed57c08e55401d84cfe49c16eea03690a213f89fa9e6"} Nov 25 09:09:49 crc kubenswrapper[4932]: I1125 09:09:49.624125 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.026812 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-q67zl" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.034743 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-bc42-account-create-ppnl9" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.104432 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzmpk\" (UniqueName: \"kubernetes.io/projected/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-kube-api-access-qzmpk\") pod \"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046\" (UID: \"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046\") " Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.104517 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-operator-scripts\") pod \"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046\" (UID: \"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046\") " Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.105371 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046" (UID: "6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.111135 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-kube-api-access-qzmpk" (OuterVolumeSpecName: "kube-api-access-qzmpk") pod "6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046" (UID: "6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046"). InnerVolumeSpecName "kube-api-access-qzmpk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.206520 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbf19371-546d-4971-a555-443c36b129be-operator-scripts\") pod \"cbf19371-546d-4971-a555-443c36b129be\" (UID: \"cbf19371-546d-4971-a555-443c36b129be\") " Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.206586 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dglhv\" (UniqueName: \"kubernetes.io/projected/cbf19371-546d-4971-a555-443c36b129be-kube-api-access-dglhv\") pod \"cbf19371-546d-4971-a555-443c36b129be\" (UID: \"cbf19371-546d-4971-a555-443c36b129be\") " Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.207161 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzmpk\" (UniqueName: \"kubernetes.io/projected/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-kube-api-access-qzmpk\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.207197 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.207224 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbf19371-546d-4971-a555-443c36b129be-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cbf19371-546d-4971-a555-443c36b129be" (UID: "cbf19371-546d-4971-a555-443c36b129be"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.210661 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbf19371-546d-4971-a555-443c36b129be-kube-api-access-dglhv" (OuterVolumeSpecName: "kube-api-access-dglhv") pod "cbf19371-546d-4971-a555-443c36b129be" (UID: "cbf19371-546d-4971-a555-443c36b129be"). InnerVolumeSpecName "kube-api-access-dglhv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.309070 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dglhv\" (UniqueName: \"kubernetes.io/projected/cbf19371-546d-4971-a555-443c36b129be-kube-api-access-dglhv\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.309112 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cbf19371-546d-4971-a555-443c36b129be-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.472634 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-q67zl" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.472582 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-q67zl" event={"ID":"6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046","Type":"ContainerDied","Data":"c1b26e3ded5787c92264ece3438207d9b04466d5c0e1105d5651f2617205f6b9"} Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.472872 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1b26e3ded5787c92264ece3438207d9b04466d5c0e1105d5651f2617205f6b9" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.473759 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7301e0af-07da-4591-b6c7-62b14c109823","Type":"ContainerStarted","Data":"655e80f478c8b35965cdabc01218c33284eace872e9057fe8fc4225534631956"} Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.475443 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-bc42-account-create-ppnl9" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.481278 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-bc42-account-create-ppnl9" event={"ID":"cbf19371-546d-4971-a555-443c36b129be","Type":"ContainerDied","Data":"28885e5ae0234685a8c0bded02ec874704c339140a102849743703a6f0ff5e44"} Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.481343 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28885e5ae0234685a8c0bded02ec874704c339140a102849743703a6f0ff5e44" Nov 25 09:09:50 crc kubenswrapper[4932]: I1125 09:09:50.640953 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="712f11c0-89ae-4e39-b6c6-3bb7303ccaae" path="/var/lib/kubelet/pods/712f11c0-89ae-4e39-b6c6-3bb7303ccaae/volumes" Nov 25 09:09:54 crc kubenswrapper[4932]: I1125 09:09:54.432070 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:54 crc kubenswrapper[4932]: I1125 09:09:54.446530 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.466257 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a937-account-create-d2hjq" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.477712 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-q55gm" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.508273 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7e0a-account-create-4tk2l" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.520860 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-5mlqd" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.536960 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7e0a-account-create-4tk2l" event={"ID":"afee3e89-74a3-4ad0-ac06-c5c97efa8543","Type":"ContainerDied","Data":"ab80d9c5c2501edec512c019e6c97c4c07ba8c121d562ca1fdc692673649723c"} Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.536993 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab80d9c5c2501edec512c019e6c97c4c07ba8c121d562ca1fdc692673649723c" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.537051 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7e0a-account-create-4tk2l" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.539728 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-q55gm" event={"ID":"339c14cf-c65c-41e7-a983-d16b36bf01ea","Type":"ContainerDied","Data":"f735a31dd042cfe5cb1b07358434fc602ddaa96c7c78d6a6845c3305673e1d2e"} Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.539752 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f735a31dd042cfe5cb1b07358434fc602ddaa96c7c78d6a6845c3305673e1d2e" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.539892 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-q55gm" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.542271 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-a937-account-create-d2hjq" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.542741 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-a937-account-create-d2hjq" event={"ID":"fc642fd3-edd5-4598-9eeb-06bdb9748b1a","Type":"ContainerDied","Data":"840c4444509d964f704eb4169695c50b63d3050f96c65c8c60c1559a9d6368da"} Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.542769 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="840c4444509d964f704eb4169695c50b63d3050f96c65c8c60c1559a9d6368da" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.547744 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-5mlqd" event={"ID":"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1","Type":"ContainerDied","Data":"27191d8b21ec33c2378bccda1b230b32918d9fb20ee7152b95fbbe41d095e92e"} Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.547788 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27191d8b21ec33c2378bccda1b230b32918d9fb20ee7152b95fbbe41d095e92e" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.547858 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-5mlqd" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.598089 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4xmtx\" (UniqueName: \"kubernetes.io/projected/afee3e89-74a3-4ad0-ac06-c5c97efa8543-kube-api-access-4xmtx\") pod \"afee3e89-74a3-4ad0-ac06-c5c97efa8543\" (UID: \"afee3e89-74a3-4ad0-ac06-c5c97efa8543\") " Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.598176 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwx72\" (UniqueName: \"kubernetes.io/projected/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-kube-api-access-rwx72\") pod \"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1\" (UID: \"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1\") " Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.598233 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-operator-scripts\") pod \"fc642fd3-edd5-4598-9eeb-06bdb9748b1a\" (UID: \"fc642fd3-edd5-4598-9eeb-06bdb9748b1a\") " Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.598305 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/afee3e89-74a3-4ad0-ac06-c5c97efa8543-operator-scripts\") pod \"afee3e89-74a3-4ad0-ac06-c5c97efa8543\" (UID: \"afee3e89-74a3-4ad0-ac06-c5c97efa8543\") " Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.598334 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-operator-scripts\") pod \"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1\" (UID: \"b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1\") " Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.598381 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66dzt\" (UniqueName: \"kubernetes.io/projected/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-kube-api-access-66dzt\") pod \"fc642fd3-edd5-4598-9eeb-06bdb9748b1a\" (UID: \"fc642fd3-edd5-4598-9eeb-06bdb9748b1a\") " Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.598446 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/339c14cf-c65c-41e7-a983-d16b36bf01ea-operator-scripts\") pod \"339c14cf-c65c-41e7-a983-d16b36bf01ea\" (UID: \"339c14cf-c65c-41e7-a983-d16b36bf01ea\") " Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.598466 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndjhp\" (UniqueName: \"kubernetes.io/projected/339c14cf-c65c-41e7-a983-d16b36bf01ea-kube-api-access-ndjhp\") pod \"339c14cf-c65c-41e7-a983-d16b36bf01ea\" (UID: \"339c14cf-c65c-41e7-a983-d16b36bf01ea\") " Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.600857 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afee3e89-74a3-4ad0-ac06-c5c97efa8543-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "afee3e89-74a3-4ad0-ac06-c5c97efa8543" (UID: "afee3e89-74a3-4ad0-ac06-c5c97efa8543"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.602353 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/339c14cf-c65c-41e7-a983-d16b36bf01ea-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "339c14cf-c65c-41e7-a983-d16b36bf01ea" (UID: "339c14cf-c65c-41e7-a983-d16b36bf01ea"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.604174 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fc642fd3-edd5-4598-9eeb-06bdb9748b1a" (UID: "fc642fd3-edd5-4598-9eeb-06bdb9748b1a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.604522 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afee3e89-74a3-4ad0-ac06-c5c97efa8543-kube-api-access-4xmtx" (OuterVolumeSpecName: "kube-api-access-4xmtx") pod "afee3e89-74a3-4ad0-ac06-c5c97efa8543" (UID: "afee3e89-74a3-4ad0-ac06-c5c97efa8543"). InnerVolumeSpecName "kube-api-access-4xmtx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.605128 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/339c14cf-c65c-41e7-a983-d16b36bf01ea-kube-api-access-ndjhp" (OuterVolumeSpecName: "kube-api-access-ndjhp") pod "339c14cf-c65c-41e7-a983-d16b36bf01ea" (UID: "339c14cf-c65c-41e7-a983-d16b36bf01ea"). InnerVolumeSpecName "kube-api-access-ndjhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.606470 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-kube-api-access-rwx72" (OuterVolumeSpecName: "kube-api-access-rwx72") pod "b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1" (UID: "b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1"). InnerVolumeSpecName "kube-api-access-rwx72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.622279 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-kube-api-access-66dzt" (OuterVolumeSpecName: "kube-api-access-66dzt") pod "fc642fd3-edd5-4598-9eeb-06bdb9748b1a" (UID: "fc642fd3-edd5-4598-9eeb-06bdb9748b1a"). InnerVolumeSpecName "kube-api-access-66dzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.622411 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1" (UID: "b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.700513 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.700549 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/afee3e89-74a3-4ad0-ac06-c5c97efa8543-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.700562 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.700574 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66dzt\" (UniqueName: \"kubernetes.io/projected/fc642fd3-edd5-4598-9eeb-06bdb9748b1a-kube-api-access-66dzt\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.700587 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/339c14cf-c65c-41e7-a983-d16b36bf01ea-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.700598 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndjhp\" (UniqueName: \"kubernetes.io/projected/339c14cf-c65c-41e7-a983-d16b36bf01ea-kube-api-access-ndjhp\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.700610 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4xmtx\" (UniqueName: \"kubernetes.io/projected/afee3e89-74a3-4ad0-ac06-c5c97efa8543-kube-api-access-4xmtx\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:55 crc kubenswrapper[4932]: I1125 09:09:55.700621 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwx72\" (UniqueName: \"kubernetes.io/projected/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1-kube-api-access-rwx72\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:56 crc kubenswrapper[4932]: I1125 09:09:56.565694 4932 generic.go:334] "Generic (PLEG): container finished" podID="e14c1b6a-a83b-47fc-8fac-36468c1b4df5" containerID="044d1296bb65dfef08ae69e4e66aaaf33fef1c2827f6ab1bc28df180a80213c7" exitCode=0 Nov 25 09:09:56 crc kubenswrapper[4932]: I1125 09:09:56.565994 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lc5vk" event={"ID":"e14c1b6a-a83b-47fc-8fac-36468c1b4df5","Type":"ContainerDied","Data":"044d1296bb65dfef08ae69e4e66aaaf33fef1c2827f6ab1bc28df180a80213c7"} Nov 25 09:09:56 crc kubenswrapper[4932]: I1125 09:09:56.568953 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7301e0af-07da-4591-b6c7-62b14c109823","Type":"ContainerStarted","Data":"4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e"} Nov 25 09:09:56 crc kubenswrapper[4932]: I1125 09:09:56.568996 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7301e0af-07da-4591-b6c7-62b14c109823","Type":"ContainerStarted","Data":"941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64"} Nov 25 09:09:57 crc kubenswrapper[4932]: I1125 09:09:57.578587 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7301e0af-07da-4591-b6c7-62b14c109823","Type":"ContainerStarted","Data":"d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99"} Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.001385 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.044231 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-db-sync-config-data\") pod \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.044303 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tljhf\" (UniqueName: \"kubernetes.io/projected/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-kube-api-access-tljhf\") pod \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.044351 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-config-data\") pod \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.044402 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-scripts\") pod \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.044434 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-combined-ca-bundle\") pod \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.044532 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-etc-machine-id\") pod \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\" (UID: \"e14c1b6a-a83b-47fc-8fac-36468c1b4df5\") " Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.044797 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "e14c1b6a-a83b-47fc-8fac-36468c1b4df5" (UID: "e14c1b6a-a83b-47fc-8fac-36468c1b4df5"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.045007 4932 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.051912 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e14c1b6a-a83b-47fc-8fac-36468c1b4df5" (UID: "e14c1b6a-a83b-47fc-8fac-36468c1b4df5"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.060032 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-scripts" (OuterVolumeSpecName: "scripts") pod "e14c1b6a-a83b-47fc-8fac-36468c1b4df5" (UID: "e14c1b6a-a83b-47fc-8fac-36468c1b4df5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.061526 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-kube-api-access-tljhf" (OuterVolumeSpecName: "kube-api-access-tljhf") pod "e14c1b6a-a83b-47fc-8fac-36468c1b4df5" (UID: "e14c1b6a-a83b-47fc-8fac-36468c1b4df5"). InnerVolumeSpecName "kube-api-access-tljhf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.093883 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e14c1b6a-a83b-47fc-8fac-36468c1b4df5" (UID: "e14c1b6a-a83b-47fc-8fac-36468c1b4df5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.125456 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-config-data" (OuterVolumeSpecName: "config-data") pod "e14c1b6a-a83b-47fc-8fac-36468c1b4df5" (UID: "e14c1b6a-a83b-47fc-8fac-36468c1b4df5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.146851 4932 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.146893 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tljhf\" (UniqueName: \"kubernetes.io/projected/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-kube-api-access-tljhf\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.146907 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.146918 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.146930 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e14c1b6a-a83b-47fc-8fac-36468c1b4df5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.594695 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7301e0af-07da-4591-b6c7-62b14c109823","Type":"ContainerStarted","Data":"298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20"} Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.594761 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.598844 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-lc5vk" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.598753 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-lc5vk" event={"ID":"e14c1b6a-a83b-47fc-8fac-36468c1b4df5","Type":"ContainerDied","Data":"f67112bfa94da491ace2c4570bb536dd81d65d8e78cfaf5e5b46f24359dba0ce"} Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.610794 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f67112bfa94da491ace2c4570bb536dd81d65d8e78cfaf5e5b46f24359dba0ce" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.626395 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.422158878 podStartE2EDuration="10.626373346s" podCreationTimestamp="2025-11-25 09:09:48 +0000 UTC" firstStartedPulling="2025-11-25 09:09:49.649124493 +0000 UTC m=+1249.775154056" lastFinishedPulling="2025-11-25 09:09:57.853338961 +0000 UTC m=+1257.979368524" observedRunningTime="2025-11-25 09:09:58.624421388 +0000 UTC m=+1258.750450961" watchObservedRunningTime="2025-11-25 09:09:58.626373346 +0000 UTC m=+1258.752402919" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.844248 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:09:58 crc kubenswrapper[4932]: E1125 09:09:58.844728 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1" containerName="mariadb-database-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.844752 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1" containerName="mariadb-database-create" Nov 25 09:09:58 crc kubenswrapper[4932]: E1125 09:09:58.844777 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc642fd3-edd5-4598-9eeb-06bdb9748b1a" containerName="mariadb-account-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.844786 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc642fd3-edd5-4598-9eeb-06bdb9748b1a" containerName="mariadb-account-create" Nov 25 09:09:58 crc kubenswrapper[4932]: E1125 09:09:58.844805 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046" containerName="mariadb-database-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.844815 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046" containerName="mariadb-database-create" Nov 25 09:09:58 crc kubenswrapper[4932]: E1125 09:09:58.844838 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e14c1b6a-a83b-47fc-8fac-36468c1b4df5" containerName="cinder-db-sync" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.844847 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e14c1b6a-a83b-47fc-8fac-36468c1b4df5" containerName="cinder-db-sync" Nov 25 09:09:58 crc kubenswrapper[4932]: E1125 09:09:58.844863 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbf19371-546d-4971-a555-443c36b129be" containerName="mariadb-account-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.844872 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbf19371-546d-4971-a555-443c36b129be" containerName="mariadb-account-create" Nov 25 09:09:58 crc kubenswrapper[4932]: E1125 09:09:58.844889 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="339c14cf-c65c-41e7-a983-d16b36bf01ea" containerName="mariadb-database-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.844897 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="339c14cf-c65c-41e7-a983-d16b36bf01ea" containerName="mariadb-database-create" Nov 25 09:09:58 crc kubenswrapper[4932]: E1125 09:09:58.844915 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afee3e89-74a3-4ad0-ac06-c5c97efa8543" containerName="mariadb-account-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.844922 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="afee3e89-74a3-4ad0-ac06-c5c97efa8543" containerName="mariadb-account-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.845144 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="afee3e89-74a3-4ad0-ac06-c5c97efa8543" containerName="mariadb-account-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.845160 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc642fd3-edd5-4598-9eeb-06bdb9748b1a" containerName="mariadb-account-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.845172 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="339c14cf-c65c-41e7-a983-d16b36bf01ea" containerName="mariadb-database-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.845239 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1" containerName="mariadb-database-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.845258 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046" containerName="mariadb-database-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.845279 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbf19371-546d-4971-a555-443c36b129be" containerName="mariadb-account-create" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.845291 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e14c1b6a-a83b-47fc-8fac-36468c1b4df5" containerName="cinder-db-sync" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.846561 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.855877 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.861705 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.861881 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.862035 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-v9kjf" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.862091 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.929498 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b648c594c-2gc6v"] Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.930996 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.944013 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b648c594c-2gc6v"] Nov 25 09:09:58 crc kubenswrapper[4932]: E1125 09:09:58.944109 4932 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode14c1b6a_a83b_47fc_8fac_36468c1b4df5.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode14c1b6a_a83b_47fc_8fac_36468c1b4df5.slice/crio-f67112bfa94da491ace2c4570bb536dd81d65d8e78cfaf5e5b46f24359dba0ce\": RecentStats: unable to find data in memory cache]" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.969362 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-sb\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.969426 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.969447 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-nb\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.969474 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.969514 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.969555 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e56b5819-c298-4533-9a40-7620e7af75c1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.969750 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-scripts\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.969787 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-config\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.969933 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xl44w\" (UniqueName: \"kubernetes.io/projected/e56b5819-c298-4533-9a40-7620e7af75c1-kube-api-access-xl44w\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.970013 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlnxg\" (UniqueName: \"kubernetes.io/projected/7d4c4389-5cd7-480d-aa80-22868cc356d4-kube-api-access-vlnxg\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.970065 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-svc\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:58 crc kubenswrapper[4932]: I1125 09:09:58.970137 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-swift-storage-0\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.054669 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.057412 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.059568 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072224 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072295 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072346 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e56b5819-c298-4533-9a40-7620e7af75c1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072391 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-scripts\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072426 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-config\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072450 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xl44w\" (UniqueName: \"kubernetes.io/projected/e56b5819-c298-4533-9a40-7620e7af75c1-kube-api-access-xl44w\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072497 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlnxg\" (UniqueName: \"kubernetes.io/projected/7d4c4389-5cd7-480d-aa80-22868cc356d4-kube-api-access-vlnxg\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072525 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-svc\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072571 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-swift-storage-0\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072610 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-sb\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072639 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.072661 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-nb\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.074441 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-nb\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.075417 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-svc\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.076058 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-swift-storage-0\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.076767 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-sb\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.077503 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e56b5819-c298-4533-9a40-7620e7af75c1-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.077742 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-config\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.082822 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.097533 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.102957 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlnxg\" (UniqueName: \"kubernetes.io/projected/7d4c4389-5cd7-480d-aa80-22868cc356d4-kube-api-access-vlnxg\") pod \"dnsmasq-dns-5b648c594c-2gc6v\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.107556 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xl44w\" (UniqueName: \"kubernetes.io/projected/e56b5819-c298-4533-9a40-7620e7af75c1-kube-api-access-xl44w\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.108054 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.110403 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.125484 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-scripts\") pod \"cinder-scheduler-0\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.176407 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.176756 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-scripts\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.176816 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlx26\" (UniqueName: \"kubernetes.io/projected/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-kube-api-access-hlx26\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.176859 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data-custom\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.176972 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-logs\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.176998 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.177020 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.227436 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.276964 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.278242 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-logs\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.278287 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.278306 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.278378 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.278397 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-scripts\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.278433 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlx26\" (UniqueName: \"kubernetes.io/projected/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-kube-api-access-hlx26\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.278459 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data-custom\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.282081 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-logs\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.282143 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.286322 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data-custom\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.287514 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.293070 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.295646 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-scripts\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.303212 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlx26\" (UniqueName: \"kubernetes.io/projected/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-kube-api-access-hlx26\") pod \"cinder-api-0\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.379664 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.629028 4932 generic.go:334] "Generic (PLEG): container finished" podID="e12148ff-1b2e-4c34-85c0-ca43747a2eb4" containerID="04f68200aa484dc2dc939e490dcf4ff88ff89773a7c654ea9b8bce2bac1b8aaf" exitCode=0 Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.630847 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-k6hqv" event={"ID":"e12148ff-1b2e-4c34-85c0-ca43747a2eb4","Type":"ContainerDied","Data":"04f68200aa484dc2dc939e490dcf4ff88ff89773a7c654ea9b8bce2bac1b8aaf"} Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.758003 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.900945 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6ktnm"] Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.902001 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.904731 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.904950 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-tp97l" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.913702 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.919805 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6ktnm"] Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.995098 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.995158 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-scripts\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.995270 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-config-data\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:09:59 crc kubenswrapper[4932]: I1125 09:09:59.995304 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fg222\" (UniqueName: \"kubernetes.io/projected/b9fc814a-d54a-4157-9257-db33b7734522-kube-api-access-fg222\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.007006 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.049127 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b648c594c-2gc6v"] Nov 25 09:10:00 crc kubenswrapper[4932]: W1125 09:10:00.055515 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7d4c4389_5cd7_480d_aa80_22868cc356d4.slice/crio-70b28957d5de529f75231922c9e482e257a4c5e62941e52f1d4f00ac44dd31cc WatchSource:0}: Error finding container 70b28957d5de529f75231922c9e482e257a4c5e62941e52f1d4f00ac44dd31cc: Status 404 returned error can't find the container with id 70b28957d5de529f75231922c9e482e257a4c5e62941e52f1d4f00ac44dd31cc Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.098275 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.098319 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-scripts\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.098362 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-config-data\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.098383 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fg222\" (UniqueName: \"kubernetes.io/projected/b9fc814a-d54a-4157-9257-db33b7734522-kube-api-access-fg222\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.107799 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-scripts\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.111774 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-config-data\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.117321 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.135002 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fg222\" (UniqueName: \"kubernetes.io/projected/b9fc814a-d54a-4157-9257-db33b7734522-kube-api-access-fg222\") pod \"nova-cell0-conductor-db-sync-6ktnm\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.233621 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.712584 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3637f5df-c0aa-4cf4-9885-4a0e63886fb6","Type":"ContainerStarted","Data":"be3d744105a5bc3d322d1ce2ae81aa5f091bfa7074c8aea77fd206de4c3e6533"} Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.725501 4932 generic.go:334] "Generic (PLEG): container finished" podID="7d4c4389-5cd7-480d-aa80-22868cc356d4" containerID="a86711869eb881f92dfce4f55407fa60c093aebc20bd9b457c2f7185e717abc6" exitCode=0 Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.725599 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" event={"ID":"7d4c4389-5cd7-480d-aa80-22868cc356d4","Type":"ContainerDied","Data":"a86711869eb881f92dfce4f55407fa60c093aebc20bd9b457c2f7185e717abc6"} Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.725628 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" event={"ID":"7d4c4389-5cd7-480d-aa80-22868cc356d4","Type":"ContainerStarted","Data":"70b28957d5de529f75231922c9e482e257a4c5e62941e52f1d4f00ac44dd31cc"} Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.735666 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e56b5819-c298-4533-9a40-7620e7af75c1","Type":"ContainerStarted","Data":"21a25883869d7267ee4dd5386633b54ed802e007073c3a52c980dcb8741e692c"} Nov 25 09:10:00 crc kubenswrapper[4932]: I1125 09:10:00.775297 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6ktnm"] Nov 25 09:10:00 crc kubenswrapper[4932]: W1125 09:10:00.787360 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9fc814a_d54a_4157_9257_db33b7734522.slice/crio-2c1daf48829f22373c8ef3759696d4eb5596903a7ebdc3056be63c2cb907231c WatchSource:0}: Error finding container 2c1daf48829f22373c8ef3759696d4eb5596903a7ebdc3056be63c2cb907231c: Status 404 returned error can't find the container with id 2c1daf48829f22373c8ef3759696d4eb5596903a7ebdc3056be63c2cb907231c Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.116114 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.255172 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgds5\" (UniqueName: \"kubernetes.io/projected/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-kube-api-access-pgds5\") pod \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.255559 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-config\") pod \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.255787 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-combined-ca-bundle\") pod \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\" (UID: \"e12148ff-1b2e-4c34-85c0-ca43747a2eb4\") " Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.267239 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-kube-api-access-pgds5" (OuterVolumeSpecName: "kube-api-access-pgds5") pod "e12148ff-1b2e-4c34-85c0-ca43747a2eb4" (UID: "e12148ff-1b2e-4c34-85c0-ca43747a2eb4"). InnerVolumeSpecName "kube-api-access-pgds5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.327501 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e12148ff-1b2e-4c34-85c0-ca43747a2eb4" (UID: "e12148ff-1b2e-4c34-85c0-ca43747a2eb4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.333719 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-config" (OuterVolumeSpecName: "config") pod "e12148ff-1b2e-4c34-85c0-ca43747a2eb4" (UID: "e12148ff-1b2e-4c34-85c0-ca43747a2eb4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.359989 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.360517 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pgds5\" (UniqueName: \"kubernetes.io/projected/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-kube-api-access-pgds5\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.360600 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/e12148ff-1b2e-4c34-85c0-ca43747a2eb4-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.609696 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.755841 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-k6hqv" event={"ID":"e12148ff-1b2e-4c34-85c0-ca43747a2eb4","Type":"ContainerDied","Data":"63cef85d075ed786b0d7093447d9525278c47ddd5f5b8887aa8b3d69eb0534ca"} Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.755880 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="63cef85d075ed786b0d7093447d9525278c47ddd5f5b8887aa8b3d69eb0534ca" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.755885 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-k6hqv" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.757487 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e56b5819-c298-4533-9a40-7620e7af75c1","Type":"ContainerStarted","Data":"14d98a765c0db3f2216e071be3e5d12d51db407dc963fd836335a4812505b4c1"} Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.760468 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3637f5df-c0aa-4cf4-9885-4a0e63886fb6","Type":"ContainerStarted","Data":"93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1"} Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.766889 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" event={"ID":"7d4c4389-5cd7-480d-aa80-22868cc356d4","Type":"ContainerStarted","Data":"21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96"} Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.771415 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.789945 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6ktnm" event={"ID":"b9fc814a-d54a-4157-9257-db33b7734522","Type":"ContainerStarted","Data":"2c1daf48829f22373c8ef3759696d4eb5596903a7ebdc3056be63c2cb907231c"} Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.802066 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" podStartSLOduration=3.802044429 podStartE2EDuration="3.802044429s" podCreationTimestamp="2025-11-25 09:09:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:10:01.801776821 +0000 UTC m=+1261.927806384" watchObservedRunningTime="2025-11-25 09:10:01.802044429 +0000 UTC m=+1261.928073992" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.913574 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b648c594c-2gc6v"] Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.951667 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-fqnsw"] Nov 25 09:10:01 crc kubenswrapper[4932]: E1125 09:10:01.952382 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e12148ff-1b2e-4c34-85c0-ca43747a2eb4" containerName="neutron-db-sync" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.952399 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e12148ff-1b2e-4c34-85c0-ca43747a2eb4" containerName="neutron-db-sync" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.952722 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e12148ff-1b2e-4c34-85c0-ca43747a2eb4" containerName="neutron-db-sync" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.954243 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:01 crc kubenswrapper[4932]: I1125 09:10:01.978455 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-fqnsw"] Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.042747 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-77fc89ff58-gpqc5"] Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.045117 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.054813 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-qg8rj" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.055074 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.055396 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.055594 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.078063 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-77fc89ff58-gpqc5"] Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.079224 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-swift-storage-0\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.079319 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-svc\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.079390 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-nb\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.079425 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-config\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.079554 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-sb\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.079607 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44jmn\" (UniqueName: \"kubernetes.io/projected/3757d7e1-b11f-4e98-964a-611d24f165af-kube-api-access-44jmn\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.181624 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfjdv\" (UniqueName: \"kubernetes.io/projected/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-kube-api-access-pfjdv\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.181685 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-svc\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.181714 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-ovndb-tls-certs\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.181742 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-combined-ca-bundle\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.181760 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-config\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.181784 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-nb\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.181808 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-config\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.181822 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-sb\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.181845 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44jmn\" (UniqueName: \"kubernetes.io/projected/3757d7e1-b11f-4e98-964a-611d24f165af-kube-api-access-44jmn\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.181876 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-httpd-config\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.181935 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-swift-storage-0\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.182873 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-swift-storage-0\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.183012 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-sb\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.183218 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-svc\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.183708 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-nb\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.184315 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-config\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.205477 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44jmn\" (UniqueName: \"kubernetes.io/projected/3757d7e1-b11f-4e98-964a-611d24f165af-kube-api-access-44jmn\") pod \"dnsmasq-dns-7965876c4f-fqnsw\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.283478 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-httpd-config\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.283622 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfjdv\" (UniqueName: \"kubernetes.io/projected/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-kube-api-access-pfjdv\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.283674 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-ovndb-tls-certs\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.283714 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-combined-ca-bundle\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.283745 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-config\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.293948 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.296236 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-config\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.297280 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-combined-ca-bundle\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.298061 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-ovndb-tls-certs\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.298132 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-httpd-config\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.307620 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfjdv\" (UniqueName: \"kubernetes.io/projected/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-kube-api-access-pfjdv\") pod \"neutron-77fc89ff58-gpqc5\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.374681 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.561036 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.561987 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="ceilometer-central-agent" containerID="cri-o://941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64" gracePeriod=30 Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.562844 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="proxy-httpd" containerID="cri-o://298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20" gracePeriod=30 Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.563357 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="ceilometer-notification-agent" containerID="cri-o://4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e" gracePeriod=30 Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.563380 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="sg-core" containerID="cri-o://d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99" gracePeriod=30 Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.830438 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3637f5df-c0aa-4cf4-9885-4a0e63886fb6","Type":"ContainerStarted","Data":"4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2"} Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.830833 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="3637f5df-c0aa-4cf4-9885-4a0e63886fb6" containerName="cinder-api-log" containerID="cri-o://93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1" gracePeriod=30 Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.831075 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.831358 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="3637f5df-c0aa-4cf4-9885-4a0e63886fb6" containerName="cinder-api" containerID="cri-o://4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2" gracePeriod=30 Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.845004 4932 generic.go:334] "Generic (PLEG): container finished" podID="7301e0af-07da-4591-b6c7-62b14c109823" containerID="298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20" exitCode=0 Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.845312 4932 generic.go:334] "Generic (PLEG): container finished" podID="7301e0af-07da-4591-b6c7-62b14c109823" containerID="d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99" exitCode=2 Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.845521 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7301e0af-07da-4591-b6c7-62b14c109823","Type":"ContainerDied","Data":"298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20"} Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.845707 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7301e0af-07da-4591-b6c7-62b14c109823","Type":"ContainerDied","Data":"d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99"} Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.908812 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e56b5819-c298-4533-9a40-7620e7af75c1","Type":"ContainerStarted","Data":"047371326a65cbcd66896fe69decc959c902a3259f649af6714073deae1e8854"} Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.913101 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.91308112 podStartE2EDuration="3.91308112s" podCreationTimestamp="2025-11-25 09:09:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:10:02.861470286 +0000 UTC m=+1262.987499849" watchObservedRunningTime="2025-11-25 09:10:02.91308112 +0000 UTC m=+1263.039110683" Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.948501 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-fqnsw"] Nov 25 09:10:02 crc kubenswrapper[4932]: I1125 09:10:02.959397 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.019013354 podStartE2EDuration="4.959379668s" podCreationTimestamp="2025-11-25 09:09:58 +0000 UTC" firstStartedPulling="2025-11-25 09:09:59.772468245 +0000 UTC m=+1259.898497808" lastFinishedPulling="2025-11-25 09:10:00.712834559 +0000 UTC m=+1260.838864122" observedRunningTime="2025-11-25 09:10:02.93968098 +0000 UTC m=+1263.065710553" watchObservedRunningTime="2025-11-25 09:10:02.959379668 +0000 UTC m=+1263.085409231" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.187380 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-77fc89ff58-gpqc5"] Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.562237 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.626903 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8hzt\" (UniqueName: \"kubernetes.io/projected/7301e0af-07da-4591-b6c7-62b14c109823-kube-api-access-k8hzt\") pod \"7301e0af-07da-4591-b6c7-62b14c109823\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.626952 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-sg-core-conf-yaml\") pod \"7301e0af-07da-4591-b6c7-62b14c109823\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.626978 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-combined-ca-bundle\") pod \"7301e0af-07da-4591-b6c7-62b14c109823\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.627003 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-run-httpd\") pod \"7301e0af-07da-4591-b6c7-62b14c109823\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.627088 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-config-data\") pod \"7301e0af-07da-4591-b6c7-62b14c109823\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.627182 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-scripts\") pod \"7301e0af-07da-4591-b6c7-62b14c109823\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.627300 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-log-httpd\") pod \"7301e0af-07da-4591-b6c7-62b14c109823\" (UID: \"7301e0af-07da-4591-b6c7-62b14c109823\") " Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.628148 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7301e0af-07da-4591-b6c7-62b14c109823" (UID: "7301e0af-07da-4591-b6c7-62b14c109823"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.628671 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7301e0af-07da-4591-b6c7-62b14c109823" (UID: "7301e0af-07da-4591-b6c7-62b14c109823"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.638880 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7301e0af-07da-4591-b6c7-62b14c109823-kube-api-access-k8hzt" (OuterVolumeSpecName: "kube-api-access-k8hzt") pod "7301e0af-07da-4591-b6c7-62b14c109823" (UID: "7301e0af-07da-4591-b6c7-62b14c109823"). InnerVolumeSpecName "kube-api-access-k8hzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.642381 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-scripts" (OuterVolumeSpecName: "scripts") pod "7301e0af-07da-4591-b6c7-62b14c109823" (UID: "7301e0af-07da-4591-b6c7-62b14c109823"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.683073 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7301e0af-07da-4591-b6c7-62b14c109823" (UID: "7301e0af-07da-4591-b6c7-62b14c109823"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.732290 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.732336 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.732351 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8hzt\" (UniqueName: \"kubernetes.io/projected/7301e0af-07da-4591-b6c7-62b14c109823-kube-api-access-k8hzt\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.732365 4932 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.732376 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7301e0af-07da-4591-b6c7-62b14c109823-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.801312 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7301e0af-07da-4591-b6c7-62b14c109823" (UID: "7301e0af-07da-4591-b6c7-62b14c109823"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.813445 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-config-data" (OuterVolumeSpecName: "config-data") pod "7301e0af-07da-4591-b6c7-62b14c109823" (UID: "7301e0af-07da-4591-b6c7-62b14c109823"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.835538 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.835574 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7301e0af-07da-4591-b6c7-62b14c109823-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.887068 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.887431 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="230dcb38-b2f4-45fa-91a9-46c11fc57e3d" containerName="glance-log" containerID="cri-o://35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6" gracePeriod=30 Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.887590 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="230dcb38-b2f4-45fa-91a9-46c11fc57e3d" containerName="glance-httpd" containerID="cri-o://8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac" gracePeriod=30 Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.923519 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-77fc89ff58-gpqc5" event={"ID":"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac","Type":"ContainerStarted","Data":"95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0"} Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.923568 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-77fc89ff58-gpqc5" event={"ID":"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac","Type":"ContainerStarted","Data":"a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9"} Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.923582 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-77fc89ff58-gpqc5" event={"ID":"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac","Type":"ContainerStarted","Data":"200193e5c2f03bd1ec6656fa01d90e9754a37eb5109b7401767dfb9355b7089b"} Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.924833 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.927101 4932 generic.go:334] "Generic (PLEG): container finished" podID="3757d7e1-b11f-4e98-964a-611d24f165af" containerID="294edcdf59f6d0ce14f428296d0ac17e2f9df4e6d4cfbbe61523a9bd88effc0d" exitCode=0 Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.927165 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" event={"ID":"3757d7e1-b11f-4e98-964a-611d24f165af","Type":"ContainerDied","Data":"294edcdf59f6d0ce14f428296d0ac17e2f9df4e6d4cfbbe61523a9bd88effc0d"} Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.927205 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" event={"ID":"3757d7e1-b11f-4e98-964a-611d24f165af","Type":"ContainerStarted","Data":"13f8d83b92cb9ac1655c7b7a9d850cb3f9fa765fad7ff81e8b0dfa549d849ffa"} Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.963468 4932 generic.go:334] "Generic (PLEG): container finished" podID="7301e0af-07da-4591-b6c7-62b14c109823" containerID="4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e" exitCode=0 Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.963512 4932 generic.go:334] "Generic (PLEG): container finished" podID="7301e0af-07da-4591-b6c7-62b14c109823" containerID="941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64" exitCode=0 Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.963585 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7301e0af-07da-4591-b6c7-62b14c109823","Type":"ContainerDied","Data":"4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e"} Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.963617 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7301e0af-07da-4591-b6c7-62b14c109823","Type":"ContainerDied","Data":"941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64"} Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.963632 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7301e0af-07da-4591-b6c7-62b14c109823","Type":"ContainerDied","Data":"655e80f478c8b35965cdabc01218c33284eace872e9057fe8fc4225534631956"} Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.963653 4932 scope.go:117] "RemoveContainer" containerID="298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.963791 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:03 crc kubenswrapper[4932]: I1125 09:10:03.967848 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-77fc89ff58-gpqc5" podStartSLOduration=2.967824559 podStartE2EDuration="2.967824559s" podCreationTimestamp="2025-11-25 09:10:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:10:03.953394496 +0000 UTC m=+1264.079424069" watchObservedRunningTime="2025-11-25 09:10:03.967824559 +0000 UTC m=+1264.093854122" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.020638 4932 generic.go:334] "Generic (PLEG): container finished" podID="3637f5df-c0aa-4cf4-9885-4a0e63886fb6" containerID="93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1" exitCode=143 Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.021657 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3637f5df-c0aa-4cf4-9885-4a0e63886fb6","Type":"ContainerDied","Data":"93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1"} Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.021873 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" podUID="7d4c4389-5cd7-480d-aa80-22868cc356d4" containerName="dnsmasq-dns" containerID="cri-o://21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96" gracePeriod=10 Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.229291 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.254002 4932 scope.go:117] "RemoveContainer" containerID="d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.265147 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.282799 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.319052 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:04 crc kubenswrapper[4932]: E1125 09:10:04.319479 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="proxy-httpd" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.319496 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="proxy-httpd" Nov 25 09:10:04 crc kubenswrapper[4932]: E1125 09:10:04.319520 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="sg-core" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.319527 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="sg-core" Nov 25 09:10:04 crc kubenswrapper[4932]: E1125 09:10:04.319546 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="ceilometer-central-agent" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.319553 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="ceilometer-central-agent" Nov 25 09:10:04 crc kubenswrapper[4932]: E1125 09:10:04.319565 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="ceilometer-notification-agent" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.319571 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="ceilometer-notification-agent" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.319726 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="proxy-httpd" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.319740 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="sg-core" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.319756 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="ceilometer-notification-agent" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.319767 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="7301e0af-07da-4591-b6c7-62b14c109823" containerName="ceilometer-central-agent" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.321435 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.324006 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.324223 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.331550 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.353668 4932 scope.go:117] "RemoveContainer" containerID="4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.407844 4932 scope.go:117] "RemoveContainer" containerID="941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.457892 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-config-data\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.457942 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-run-httpd\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.457971 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-log-httpd\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.458006 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.458041 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdz72\" (UniqueName: \"kubernetes.io/projected/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-kube-api-access-bdz72\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.458116 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.458158 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-scripts\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.477720 4932 scope.go:117] "RemoveContainer" containerID="298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20" Nov 25 09:10:04 crc kubenswrapper[4932]: E1125 09:10:04.481132 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20\": container with ID starting with 298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20 not found: ID does not exist" containerID="298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.481213 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20"} err="failed to get container status \"298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20\": rpc error: code = NotFound desc = could not find container \"298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20\": container with ID starting with 298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20 not found: ID does not exist" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.481243 4932 scope.go:117] "RemoveContainer" containerID="d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99" Nov 25 09:10:04 crc kubenswrapper[4932]: E1125 09:10:04.492769 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99\": container with ID starting with d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99 not found: ID does not exist" containerID="d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.492812 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99"} err="failed to get container status \"d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99\": rpc error: code = NotFound desc = could not find container \"d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99\": container with ID starting with d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99 not found: ID does not exist" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.492838 4932 scope.go:117] "RemoveContainer" containerID="4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e" Nov 25 09:10:04 crc kubenswrapper[4932]: E1125 09:10:04.494182 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e\": container with ID starting with 4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e not found: ID does not exist" containerID="4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.494263 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e"} err="failed to get container status \"4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e\": rpc error: code = NotFound desc = could not find container \"4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e\": container with ID starting with 4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e not found: ID does not exist" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.494296 4932 scope.go:117] "RemoveContainer" containerID="941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64" Nov 25 09:10:04 crc kubenswrapper[4932]: E1125 09:10:04.494787 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64\": container with ID starting with 941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64 not found: ID does not exist" containerID="941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.494809 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64"} err="failed to get container status \"941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64\": rpc error: code = NotFound desc = could not find container \"941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64\": container with ID starting with 941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64 not found: ID does not exist" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.494824 4932 scope.go:117] "RemoveContainer" containerID="298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.495378 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20"} err="failed to get container status \"298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20\": rpc error: code = NotFound desc = could not find container \"298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20\": container with ID starting with 298f4360def524e1f88afbed0062cf645d04a4b36cbb2c3577d38e644d981e20 not found: ID does not exist" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.495394 4932 scope.go:117] "RemoveContainer" containerID="d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.495581 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99"} err="failed to get container status \"d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99\": rpc error: code = NotFound desc = could not find container \"d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99\": container with ID starting with d221d323be197440f057cd9e69488fefb78c4ed1a06feb44dd1fa87342069f99 not found: ID does not exist" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.495597 4932 scope.go:117] "RemoveContainer" containerID="4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.495789 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e"} err="failed to get container status \"4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e\": rpc error: code = NotFound desc = could not find container \"4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e\": container with ID starting with 4ff64f379d5ad26121c3089ea16f8a36973fb27f3b008bb76c3787abd1720b8e not found: ID does not exist" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.495803 4932 scope.go:117] "RemoveContainer" containerID="941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.496006 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64"} err="failed to get container status \"941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64\": rpc error: code = NotFound desc = could not find container \"941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64\": container with ID starting with 941656c026642623ce63e206b2b0a640da27132c0392af1382812c31c71d0f64 not found: ID does not exist" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.550032 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.559789 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.559840 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-scripts\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.559934 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-config-data\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.559952 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-run-httpd\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.559971 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-log-httpd\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.559997 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.560046 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdz72\" (UniqueName: \"kubernetes.io/projected/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-kube-api-access-bdz72\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.561076 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-log-httpd\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.561370 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-run-httpd\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.577980 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.578491 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-scripts\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.578672 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.584838 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-config-data\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.585103 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdz72\" (UniqueName: \"kubernetes.io/projected/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-kube-api-access-bdz72\") pod \"ceilometer-0\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.627632 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7301e0af-07da-4591-b6c7-62b14c109823" path="/var/lib/kubelet/pods/7301e0af-07da-4591-b6c7-62b14c109823/volumes" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.661731 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-nb\") pod \"7d4c4389-5cd7-480d-aa80-22868cc356d4\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.661923 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-config\") pod \"7d4c4389-5cd7-480d-aa80-22868cc356d4\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.661966 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlnxg\" (UniqueName: \"kubernetes.io/projected/7d4c4389-5cd7-480d-aa80-22868cc356d4-kube-api-access-vlnxg\") pod \"7d4c4389-5cd7-480d-aa80-22868cc356d4\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.662062 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-svc\") pod \"7d4c4389-5cd7-480d-aa80-22868cc356d4\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.662084 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-swift-storage-0\") pod \"7d4c4389-5cd7-480d-aa80-22868cc356d4\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.662892 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-sb\") pod \"7d4c4389-5cd7-480d-aa80-22868cc356d4\" (UID: \"7d4c4389-5cd7-480d-aa80-22868cc356d4\") " Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.665304 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d4c4389-5cd7-480d-aa80-22868cc356d4-kube-api-access-vlnxg" (OuterVolumeSpecName: "kube-api-access-vlnxg") pod "7d4c4389-5cd7-480d-aa80-22868cc356d4" (UID: "7d4c4389-5cd7-480d-aa80-22868cc356d4"). InnerVolumeSpecName "kube-api-access-vlnxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.670004 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.736352 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7d4c4389-5cd7-480d-aa80-22868cc356d4" (UID: "7d4c4389-5cd7-480d-aa80-22868cc356d4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.742705 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-config" (OuterVolumeSpecName: "config") pod "7d4c4389-5cd7-480d-aa80-22868cc356d4" (UID: "7d4c4389-5cd7-480d-aa80-22868cc356d4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.743597 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7d4c4389-5cd7-480d-aa80-22868cc356d4" (UID: "7d4c4389-5cd7-480d-aa80-22868cc356d4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.744904 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7d4c4389-5cd7-480d-aa80-22868cc356d4" (UID: "7d4c4389-5cd7-480d-aa80-22868cc356d4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.756971 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7d4c4389-5cd7-480d-aa80-22868cc356d4" (UID: "7d4c4389-5cd7-480d-aa80-22868cc356d4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.765574 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.765608 4932 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.765621 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.765634 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.765645 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d4c4389-5cd7-480d-aa80-22868cc356d4-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:04 crc kubenswrapper[4932]: I1125 09:10:04.765657 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlnxg\" (UniqueName: \"kubernetes.io/projected/7d4c4389-5cd7-480d-aa80-22868cc356d4-kube-api-access-vlnxg\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.057459 4932 generic.go:334] "Generic (PLEG): container finished" podID="7d4c4389-5cd7-480d-aa80-22868cc356d4" containerID="21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96" exitCode=0 Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.058244 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" event={"ID":"7d4c4389-5cd7-480d-aa80-22868cc356d4","Type":"ContainerDied","Data":"21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96"} Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.058279 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" event={"ID":"7d4c4389-5cd7-480d-aa80-22868cc356d4","Type":"ContainerDied","Data":"70b28957d5de529f75231922c9e482e257a4c5e62941e52f1d4f00ac44dd31cc"} Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.058299 4932 scope.go:117] "RemoveContainer" containerID="21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.058377 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b648c594c-2gc6v" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.071289 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" event={"ID":"3757d7e1-b11f-4e98-964a-611d24f165af","Type":"ContainerStarted","Data":"900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb"} Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.071362 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.080689 4932 generic.go:334] "Generic (PLEG): container finished" podID="230dcb38-b2f4-45fa-91a9-46c11fc57e3d" containerID="35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6" exitCode=143 Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.081713 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"230dcb38-b2f4-45fa-91a9-46c11fc57e3d","Type":"ContainerDied","Data":"35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6"} Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.101379 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" podStartSLOduration=4.101356439 podStartE2EDuration="4.101356439s" podCreationTimestamp="2025-11-25 09:10:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:10:05.088360158 +0000 UTC m=+1265.214389741" watchObservedRunningTime="2025-11-25 09:10:05.101356439 +0000 UTC m=+1265.227386002" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.104342 4932 scope.go:117] "RemoveContainer" containerID="a86711869eb881f92dfce4f55407fa60c093aebc20bd9b457c2f7185e717abc6" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.112255 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b648c594c-2gc6v"] Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.120681 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b648c594c-2gc6v"] Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.131959 4932 scope.go:117] "RemoveContainer" containerID="21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96" Nov 25 09:10:05 crc kubenswrapper[4932]: E1125 09:10:05.135540 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96\": container with ID starting with 21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96 not found: ID does not exist" containerID="21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.135595 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96"} err="failed to get container status \"21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96\": rpc error: code = NotFound desc = could not find container \"21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96\": container with ID starting with 21ba8627a3c9d4999b6ddbd6a9bc0d9b4493513ede8e2746144c654e4c9dfb96 not found: ID does not exist" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.135662 4932 scope.go:117] "RemoveContainer" containerID="a86711869eb881f92dfce4f55407fa60c093aebc20bd9b457c2f7185e717abc6" Nov 25 09:10:05 crc kubenswrapper[4932]: E1125 09:10:05.138536 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a86711869eb881f92dfce4f55407fa60c093aebc20bd9b457c2f7185e717abc6\": container with ID starting with a86711869eb881f92dfce4f55407fa60c093aebc20bd9b457c2f7185e717abc6 not found: ID does not exist" containerID="a86711869eb881f92dfce4f55407fa60c093aebc20bd9b457c2f7185e717abc6" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.138558 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a86711869eb881f92dfce4f55407fa60c093aebc20bd9b457c2f7185e717abc6"} err="failed to get container status \"a86711869eb881f92dfce4f55407fa60c093aebc20bd9b457c2f7185e717abc6\": rpc error: code = NotFound desc = could not find container \"a86711869eb881f92dfce4f55407fa60c093aebc20bd9b457c2f7185e717abc6\": container with ID starting with a86711869eb881f92dfce4f55407fa60c093aebc20bd9b457c2f7185e717abc6 not found: ID does not exist" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.214612 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.440611 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.440850 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3b55fab6-584e-4098-beb0-be91c10e631f" containerName="glance-log" containerID="cri-o://9819269c37912ac65e199eb5854e29f21125b062fca1aae014ea6ef312421df9" gracePeriod=30 Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.441010 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="3b55fab6-584e-4098-beb0-be91c10e631f" containerName="glance-httpd" containerID="cri-o://b2c7e77789303154985b5d572e2e645dc59426801021e02f4bfe4ed70e08243e" gracePeriod=30 Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.930451 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5584db9bdf-rzbj9"] Nov 25 09:10:05 crc kubenswrapper[4932]: E1125 09:10:05.931497 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d4c4389-5cd7-480d-aa80-22868cc356d4" containerName="dnsmasq-dns" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.931604 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d4c4389-5cd7-480d-aa80-22868cc356d4" containerName="dnsmasq-dns" Nov 25 09:10:05 crc kubenswrapper[4932]: E1125 09:10:05.931729 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d4c4389-5cd7-480d-aa80-22868cc356d4" containerName="init" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.931828 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d4c4389-5cd7-480d-aa80-22868cc356d4" containerName="init" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.932134 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d4c4389-5cd7-480d-aa80-22868cc356d4" containerName="dnsmasq-dns" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.933437 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.936953 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.937423 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.943359 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5584db9bdf-rzbj9"] Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.988411 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-ovndb-tls-certs\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.988512 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-public-tls-certs\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.988538 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-combined-ca-bundle\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.988606 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-httpd-config\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.988622 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-internal-tls-certs\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.988639 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-config\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:05 crc kubenswrapper[4932]: I1125 09:10:05.988662 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhnks\" (UniqueName: \"kubernetes.io/projected/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-kube-api-access-xhnks\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.090717 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-public-tls-certs\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.092296 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-combined-ca-bundle\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.093169 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-httpd-config\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.093295 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-internal-tls-certs\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.093411 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-config\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.093523 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhnks\" (UniqueName: \"kubernetes.io/projected/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-kube-api-access-xhnks\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.093998 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-ovndb-tls-certs\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.098033 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-httpd-config\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.098221 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-combined-ca-bundle\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.098974 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-public-tls-certs\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.101378 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-config\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.101879 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-internal-tls-certs\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.103659 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-ovndb-tls-certs\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.104392 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4e0e2802-c55a-4f62-8279-c2d11b3d54ee","Type":"ContainerStarted","Data":"90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0"} Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.104462 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4e0e2802-c55a-4f62-8279-c2d11b3d54ee","Type":"ContainerStarted","Data":"6f04e942706791ae76b7cf1dc6e1b30fbd8437d9111f17878851deead2680337"} Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.109750 4932 generic.go:334] "Generic (PLEG): container finished" podID="3b55fab6-584e-4098-beb0-be91c10e631f" containerID="9819269c37912ac65e199eb5854e29f21125b062fca1aae014ea6ef312421df9" exitCode=143 Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.110642 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3b55fab6-584e-4098-beb0-be91c10e631f","Type":"ContainerDied","Data":"9819269c37912ac65e199eb5854e29f21125b062fca1aae014ea6ef312421df9"} Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.126297 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhnks\" (UniqueName: \"kubernetes.io/projected/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-kube-api-access-xhnks\") pod \"neutron-5584db9bdf-rzbj9\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.316891 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.360489 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.622060 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d4c4389-5cd7-480d-aa80-22868cc356d4" path="/var/lib/kubelet/pods/7d4c4389-5cd7-480d-aa80-22868cc356d4/volumes" Nov 25 09:10:06 crc kubenswrapper[4932]: I1125 09:10:06.923899 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5584db9bdf-rzbj9"] Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.123955 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5584db9bdf-rzbj9" event={"ID":"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5","Type":"ContainerStarted","Data":"3144248aa7e9b43532c2adb1175327527d7a9769c5d0faf83f42c181dafe5e0b"} Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.136377 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4e0e2802-c55a-4f62-8279-c2d11b3d54ee","Type":"ContainerStarted","Data":"d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364"} Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.566124 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.630823 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kh2xv\" (UniqueName: \"kubernetes.io/projected/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-kube-api-access-kh2xv\") pod \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.630874 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-scripts\") pod \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.630904 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-config-data\") pod \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.630964 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-logs\") pod \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.631002 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.631042 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-httpd-run\") pod \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.631127 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-public-tls-certs\") pod \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.631145 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-combined-ca-bundle\") pod \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\" (UID: \"230dcb38-b2f4-45fa-91a9-46c11fc57e3d\") " Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.632849 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-logs" (OuterVolumeSpecName: "logs") pod "230dcb38-b2f4-45fa-91a9-46c11fc57e3d" (UID: "230dcb38-b2f4-45fa-91a9-46c11fc57e3d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.635286 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "230dcb38-b2f4-45fa-91a9-46c11fc57e3d" (UID: "230dcb38-b2f4-45fa-91a9-46c11fc57e3d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.655327 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "230dcb38-b2f4-45fa-91a9-46c11fc57e3d" (UID: "230dcb38-b2f4-45fa-91a9-46c11fc57e3d"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.655495 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-kube-api-access-kh2xv" (OuterVolumeSpecName: "kube-api-access-kh2xv") pod "230dcb38-b2f4-45fa-91a9-46c11fc57e3d" (UID: "230dcb38-b2f4-45fa-91a9-46c11fc57e3d"). InnerVolumeSpecName "kube-api-access-kh2xv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.658469 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-scripts" (OuterVolumeSpecName: "scripts") pod "230dcb38-b2f4-45fa-91a9-46c11fc57e3d" (UID: "230dcb38-b2f4-45fa-91a9-46c11fc57e3d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.699437 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-config-data" (OuterVolumeSpecName: "config-data") pod "230dcb38-b2f4-45fa-91a9-46c11fc57e3d" (UID: "230dcb38-b2f4-45fa-91a9-46c11fc57e3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.715624 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "230dcb38-b2f4-45fa-91a9-46c11fc57e3d" (UID: "230dcb38-b2f4-45fa-91a9-46c11fc57e3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.733073 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.734569 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.734610 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.734620 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.734632 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kh2xv\" (UniqueName: \"kubernetes.io/projected/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-kube-api-access-kh2xv\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.734642 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.734651 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.735271 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "230dcb38-b2f4-45fa-91a9-46c11fc57e3d" (UID: "230dcb38-b2f4-45fa-91a9-46c11fc57e3d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.775125 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.836904 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/230dcb38-b2f4-45fa-91a9-46c11fc57e3d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:07 crc kubenswrapper[4932]: I1125 09:10:07.836946 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.153919 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5584db9bdf-rzbj9" event={"ID":"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5","Type":"ContainerStarted","Data":"674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47"} Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.153999 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5584db9bdf-rzbj9" event={"ID":"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5","Type":"ContainerStarted","Data":"ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997"} Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.154095 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.172639 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4e0e2802-c55a-4f62-8279-c2d11b3d54ee","Type":"ContainerStarted","Data":"f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07"} Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.180857 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5584db9bdf-rzbj9" podStartSLOduration=3.180833301 podStartE2EDuration="3.180833301s" podCreationTimestamp="2025-11-25 09:10:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:10:08.17294061 +0000 UTC m=+1268.298970183" watchObservedRunningTime="2025-11-25 09:10:08.180833301 +0000 UTC m=+1268.306862864" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.186687 4932 generic.go:334] "Generic (PLEG): container finished" podID="230dcb38-b2f4-45fa-91a9-46c11fc57e3d" containerID="8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac" exitCode=0 Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.186741 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"230dcb38-b2f4-45fa-91a9-46c11fc57e3d","Type":"ContainerDied","Data":"8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac"} Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.186776 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.186817 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"230dcb38-b2f4-45fa-91a9-46c11fc57e3d","Type":"ContainerDied","Data":"90e3392ffc938fdbf1d276c76a105efda03fbd0a547fa3f55d4b60b7bee9c43e"} Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.186853 4932 scope.go:117] "RemoveContainer" containerID="8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.218624 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.229894 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.250440 4932 scope.go:117] "RemoveContainer" containerID="35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.254704 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:10:08 crc kubenswrapper[4932]: E1125 09:10:08.255078 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="230dcb38-b2f4-45fa-91a9-46c11fc57e3d" containerName="glance-log" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.255095 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="230dcb38-b2f4-45fa-91a9-46c11fc57e3d" containerName="glance-log" Nov 25 09:10:08 crc kubenswrapper[4932]: E1125 09:10:08.255118 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="230dcb38-b2f4-45fa-91a9-46c11fc57e3d" containerName="glance-httpd" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.255125 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="230dcb38-b2f4-45fa-91a9-46c11fc57e3d" containerName="glance-httpd" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.255347 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="230dcb38-b2f4-45fa-91a9-46c11fc57e3d" containerName="glance-log" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.255367 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="230dcb38-b2f4-45fa-91a9-46c11fc57e3d" containerName="glance-httpd" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.256298 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.267696 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.268331 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.290428 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.330047 4932 scope.go:117] "RemoveContainer" containerID="8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac" Nov 25 09:10:08 crc kubenswrapper[4932]: E1125 09:10:08.330982 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac\": container with ID starting with 8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac not found: ID does not exist" containerID="8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.331015 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac"} err="failed to get container status \"8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac\": rpc error: code = NotFound desc = could not find container \"8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac\": container with ID starting with 8454eedb57d35989bd54ff516035ccd8f6f92c19f9d7239c15de1be617bf5eac not found: ID does not exist" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.331039 4932 scope.go:117] "RemoveContainer" containerID="35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6" Nov 25 09:10:08 crc kubenswrapper[4932]: E1125 09:10:08.331495 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6\": container with ID starting with 35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6 not found: ID does not exist" containerID="35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.331517 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6"} err="failed to get container status \"35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6\": rpc error: code = NotFound desc = could not find container \"35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6\": container with ID starting with 35af0dc9e14120a08cf2e59476c55dfcc2cc213fea28241fec8354a750e64ea6 not found: ID does not exist" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.352945 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.352991 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-scripts\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.353428 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.353584 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.353665 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjrsk\" (UniqueName: \"kubernetes.io/projected/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-kube-api-access-fjrsk\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.353711 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-logs\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.353732 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-config-data\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.353783 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.456003 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.456377 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.456407 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjrsk\" (UniqueName: \"kubernetes.io/projected/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-kube-api-access-fjrsk\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.456430 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-logs\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.456447 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-config-data\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.456468 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.456542 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.456562 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-scripts\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.456783 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.457026 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-logs\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.457552 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.463013 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-scripts\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.463291 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-config-data\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.463478 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.471458 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.481851 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjrsk\" (UniqueName: \"kubernetes.io/projected/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-kube-api-access-fjrsk\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.500571 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-external-api-0\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.603653 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:10:08 crc kubenswrapper[4932]: I1125 09:10:08.619361 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="230dcb38-b2f4-45fa-91a9-46c11fc57e3d" path="/var/lib/kubelet/pods/230dcb38-b2f4-45fa-91a9-46c11fc57e3d/volumes" Nov 25 09:10:09 crc kubenswrapper[4932]: I1125 09:10:09.118484 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:10:09 crc kubenswrapper[4932]: I1125 09:10:09.219713 4932 generic.go:334] "Generic (PLEG): container finished" podID="3b55fab6-584e-4098-beb0-be91c10e631f" containerID="b2c7e77789303154985b5d572e2e645dc59426801021e02f4bfe4ed70e08243e" exitCode=0 Nov 25 09:10:09 crc kubenswrapper[4932]: I1125 09:10:09.220811 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3b55fab6-584e-4098-beb0-be91c10e631f","Type":"ContainerDied","Data":"b2c7e77789303154985b5d572e2e645dc59426801021e02f4bfe4ed70e08243e"} Nov 25 09:10:09 crc kubenswrapper[4932]: I1125 09:10:09.242000 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:10:09 crc kubenswrapper[4932]: I1125 09:10:09.507681 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 09:10:09 crc kubenswrapper[4932]: I1125 09:10:09.603643 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:10:09 crc kubenswrapper[4932]: I1125 09:10:09.623402 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:10:10 crc kubenswrapper[4932]: I1125 09:10:10.239018 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="e56b5819-c298-4533-9a40-7620e7af75c1" containerName="cinder-scheduler" containerID="cri-o://14d98a765c0db3f2216e071be3e5d12d51db407dc963fd836335a4812505b4c1" gracePeriod=30 Nov 25 09:10:10 crc kubenswrapper[4932]: I1125 09:10:10.239503 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="ceilometer-central-agent" containerID="cri-o://90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0" gracePeriod=30 Nov 25 09:10:10 crc kubenswrapper[4932]: I1125 09:10:10.239594 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4e0e2802-c55a-4f62-8279-c2d11b3d54ee","Type":"ContainerStarted","Data":"8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec"} Nov 25 09:10:10 crc kubenswrapper[4932]: I1125 09:10:10.239941 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="e56b5819-c298-4533-9a40-7620e7af75c1" containerName="probe" containerID="cri-o://047371326a65cbcd66896fe69decc959c902a3259f649af6714073deae1e8854" gracePeriod=30 Nov 25 09:10:10 crc kubenswrapper[4932]: I1125 09:10:10.240021 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:10:10 crc kubenswrapper[4932]: I1125 09:10:10.240071 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="proxy-httpd" containerID="cri-o://8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec" gracePeriod=30 Nov 25 09:10:10 crc kubenswrapper[4932]: I1125 09:10:10.240127 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="sg-core" containerID="cri-o://f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07" gracePeriod=30 Nov 25 09:10:10 crc kubenswrapper[4932]: I1125 09:10:10.240176 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="ceilometer-notification-agent" containerID="cri-o://d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364" gracePeriod=30 Nov 25 09:10:10 crc kubenswrapper[4932]: I1125 09:10:10.280520 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.586969927 podStartE2EDuration="6.280499211s" podCreationTimestamp="2025-11-25 09:10:04 +0000 UTC" firstStartedPulling="2025-11-25 09:10:05.220486003 +0000 UTC m=+1265.346515556" lastFinishedPulling="2025-11-25 09:10:08.914015277 +0000 UTC m=+1269.040044840" observedRunningTime="2025-11-25 09:10:10.274936868 +0000 UTC m=+1270.400966431" watchObservedRunningTime="2025-11-25 09:10:10.280499211 +0000 UTC m=+1270.406528774" Nov 25 09:10:11 crc kubenswrapper[4932]: I1125 09:10:11.279031 4932 generic.go:334] "Generic (PLEG): container finished" podID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerID="8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec" exitCode=0 Nov 25 09:10:11 crc kubenswrapper[4932]: I1125 09:10:11.279361 4932 generic.go:334] "Generic (PLEG): container finished" podID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerID="f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07" exitCode=2 Nov 25 09:10:11 crc kubenswrapper[4932]: I1125 09:10:11.279370 4932 generic.go:334] "Generic (PLEG): container finished" podID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerID="d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364" exitCode=0 Nov 25 09:10:11 crc kubenswrapper[4932]: I1125 09:10:11.279096 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4e0e2802-c55a-4f62-8279-c2d11b3d54ee","Type":"ContainerDied","Data":"8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec"} Nov 25 09:10:11 crc kubenswrapper[4932]: I1125 09:10:11.279440 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4e0e2802-c55a-4f62-8279-c2d11b3d54ee","Type":"ContainerDied","Data":"f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07"} Nov 25 09:10:11 crc kubenswrapper[4932]: I1125 09:10:11.279454 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4e0e2802-c55a-4f62-8279-c2d11b3d54ee","Type":"ContainerDied","Data":"d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364"} Nov 25 09:10:11 crc kubenswrapper[4932]: I1125 09:10:11.285491 4932 generic.go:334] "Generic (PLEG): container finished" podID="e56b5819-c298-4533-9a40-7620e7af75c1" containerID="047371326a65cbcd66896fe69decc959c902a3259f649af6714073deae1e8854" exitCode=0 Nov 25 09:10:11 crc kubenswrapper[4932]: I1125 09:10:11.285542 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e56b5819-c298-4533-9a40-7620e7af75c1","Type":"ContainerDied","Data":"047371326a65cbcd66896fe69decc959c902a3259f649af6714073deae1e8854"} Nov 25 09:10:11 crc kubenswrapper[4932]: I1125 09:10:11.969651 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 09:10:12 crc kubenswrapper[4932]: I1125 09:10:12.296403 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:10:12 crc kubenswrapper[4932]: I1125 09:10:12.382644 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-8ngpb"] Nov 25 09:10:12 crc kubenswrapper[4932]: I1125 09:10:12.382920 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" podUID="2ea9c914-3d06-40d7-92f3-56f27f6c8900" containerName="dnsmasq-dns" containerID="cri-o://c4cf0d747fb095e351de373eafc0beb87d2b0826cebea5d997c430b78e69d249" gracePeriod=10 Nov 25 09:10:13 crc kubenswrapper[4932]: I1125 09:10:13.308835 4932 generic.go:334] "Generic (PLEG): container finished" podID="2ea9c914-3d06-40d7-92f3-56f27f6c8900" containerID="c4cf0d747fb095e351de373eafc0beb87d2b0826cebea5d997c430b78e69d249" exitCode=0 Nov 25 09:10:13 crc kubenswrapper[4932]: I1125 09:10:13.308879 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" event={"ID":"2ea9c914-3d06-40d7-92f3-56f27f6c8900","Type":"ContainerDied","Data":"c4cf0d747fb095e351de373eafc0beb87d2b0826cebea5d997c430b78e69d249"} Nov 25 09:10:13 crc kubenswrapper[4932]: I1125 09:10:13.843183 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" podUID="2ea9c914-3d06-40d7-92f3-56f27f6c8900" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.158:5353: connect: connection refused" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.333545 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"3b55fab6-584e-4098-beb0-be91c10e631f","Type":"ContainerDied","Data":"971e9287542eb70fedae2132b2c0628d0f534e0db33279cf709e480140028bbc"} Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.333850 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="971e9287542eb70fedae2132b2c0628d0f534e0db33279cf709e480140028bbc" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.347640 4932 generic.go:334] "Generic (PLEG): container finished" podID="e56b5819-c298-4533-9a40-7620e7af75c1" containerID="14d98a765c0db3f2216e071be3e5d12d51db407dc963fd836335a4812505b4c1" exitCode=0 Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.347712 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e56b5819-c298-4533-9a40-7620e7af75c1","Type":"ContainerDied","Data":"14d98a765c0db3f2216e071be3e5d12d51db407dc963fd836335a4812505b4c1"} Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.349625 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61","Type":"ContainerStarted","Data":"dd294829b7ac1e7bd9e05c8a705e4dc8b34a49faa43b91ea8147a1f67529b4da"} Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.363934 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.457133 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-combined-ca-bundle\") pod \"3b55fab6-584e-4098-beb0-be91c10e631f\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.457201 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xn2nf\" (UniqueName: \"kubernetes.io/projected/3b55fab6-584e-4098-beb0-be91c10e631f-kube-api-access-xn2nf\") pod \"3b55fab6-584e-4098-beb0-be91c10e631f\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.457244 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-httpd-run\") pod \"3b55fab6-584e-4098-beb0-be91c10e631f\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.457306 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-logs\") pod \"3b55fab6-584e-4098-beb0-be91c10e631f\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.457370 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"3b55fab6-584e-4098-beb0-be91c10e631f\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.457396 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-internal-tls-certs\") pod \"3b55fab6-584e-4098-beb0-be91c10e631f\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.457420 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-scripts\") pod \"3b55fab6-584e-4098-beb0-be91c10e631f\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.457483 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-config-data\") pod \"3b55fab6-584e-4098-beb0-be91c10e631f\" (UID: \"3b55fab6-584e-4098-beb0-be91c10e631f\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.458569 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3b55fab6-584e-4098-beb0-be91c10e631f" (UID: "3b55fab6-584e-4098-beb0-be91c10e631f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.458694 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-logs" (OuterVolumeSpecName: "logs") pod "3b55fab6-584e-4098-beb0-be91c10e631f" (UID: "3b55fab6-584e-4098-beb0-be91c10e631f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.469932 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "3b55fab6-584e-4098-beb0-be91c10e631f" (UID: "3b55fab6-584e-4098-beb0-be91c10e631f"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.478341 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-scripts" (OuterVolumeSpecName: "scripts") pod "3b55fab6-584e-4098-beb0-be91c10e631f" (UID: "3b55fab6-584e-4098-beb0-be91c10e631f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.502749 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b55fab6-584e-4098-beb0-be91c10e631f-kube-api-access-xn2nf" (OuterVolumeSpecName: "kube-api-access-xn2nf") pod "3b55fab6-584e-4098-beb0-be91c10e631f" (UID: "3b55fab6-584e-4098-beb0-be91c10e631f"). InnerVolumeSpecName "kube-api-access-xn2nf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.502854 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3b55fab6-584e-4098-beb0-be91c10e631f" (UID: "3b55fab6-584e-4098-beb0-be91c10e631f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.536874 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3b55fab6-584e-4098-beb0-be91c10e631f" (UID: "3b55fab6-584e-4098-beb0-be91c10e631f"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.548369 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-config-data" (OuterVolumeSpecName: "config-data") pod "3b55fab6-584e-4098-beb0-be91c10e631f" (UID: "3b55fab6-584e-4098-beb0-be91c10e631f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.560403 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.560464 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.560478 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.560490 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.560501 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.560512 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b55fab6-584e-4098-beb0-be91c10e631f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.560525 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xn2nf\" (UniqueName: \"kubernetes.io/projected/3b55fab6-584e-4098-beb0-be91c10e631f-kube-api-access-xn2nf\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.560537 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3b55fab6-584e-4098-beb0-be91c10e631f-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.606019 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.661894 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.876951 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.883339 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.969707 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-combined-ca-bundle\") pod \"e56b5819-c298-4533-9a40-7620e7af75c1\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.969751 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-scripts\") pod \"e56b5819-c298-4533-9a40-7620e7af75c1\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.969784 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49bw5\" (UniqueName: \"kubernetes.io/projected/2ea9c914-3d06-40d7-92f3-56f27f6c8900-kube-api-access-49bw5\") pod \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.969815 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data-custom\") pod \"e56b5819-c298-4533-9a40-7620e7af75c1\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.969843 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xl44w\" (UniqueName: \"kubernetes.io/projected/e56b5819-c298-4533-9a40-7620e7af75c1-kube-api-access-xl44w\") pod \"e56b5819-c298-4533-9a40-7620e7af75c1\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.969926 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data\") pod \"e56b5819-c298-4533-9a40-7620e7af75c1\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.969945 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e56b5819-c298-4533-9a40-7620e7af75c1-etc-machine-id\") pod \"e56b5819-c298-4533-9a40-7620e7af75c1\" (UID: \"e56b5819-c298-4533-9a40-7620e7af75c1\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.969987 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-swift-storage-0\") pod \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.970034 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-svc\") pod \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.970050 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-nb\") pod \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.970091 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-sb\") pod \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.970109 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-config\") pod \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\" (UID: \"2ea9c914-3d06-40d7-92f3-56f27f6c8900\") " Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.974436 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e56b5819-c298-4533-9a40-7620e7af75c1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "e56b5819-c298-4533-9a40-7620e7af75c1" (UID: "e56b5819-c298-4533-9a40-7620e7af75c1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.982091 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-scripts" (OuterVolumeSpecName: "scripts") pod "e56b5819-c298-4533-9a40-7620e7af75c1" (UID: "e56b5819-c298-4533-9a40-7620e7af75c1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.983514 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ea9c914-3d06-40d7-92f3-56f27f6c8900-kube-api-access-49bw5" (OuterVolumeSpecName: "kube-api-access-49bw5") pod "2ea9c914-3d06-40d7-92f3-56f27f6c8900" (UID: "2ea9c914-3d06-40d7-92f3-56f27f6c8900"). InnerVolumeSpecName "kube-api-access-49bw5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.983986 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "e56b5819-c298-4533-9a40-7620e7af75c1" (UID: "e56b5819-c298-4533-9a40-7620e7af75c1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:15 crc kubenswrapper[4932]: I1125 09:10:15.987377 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e56b5819-c298-4533-9a40-7620e7af75c1-kube-api-access-xl44w" (OuterVolumeSpecName: "kube-api-access-xl44w") pod "e56b5819-c298-4533-9a40-7620e7af75c1" (UID: "e56b5819-c298-4533-9a40-7620e7af75c1"). InnerVolumeSpecName "kube-api-access-xl44w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.034339 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2ea9c914-3d06-40d7-92f3-56f27f6c8900" (UID: "2ea9c914-3d06-40d7-92f3-56f27f6c8900"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.035326 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-config" (OuterVolumeSpecName: "config") pod "2ea9c914-3d06-40d7-92f3-56f27f6c8900" (UID: "2ea9c914-3d06-40d7-92f3-56f27f6c8900"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.072399 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.072430 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49bw5\" (UniqueName: \"kubernetes.io/projected/2ea9c914-3d06-40d7-92f3-56f27f6c8900-kube-api-access-49bw5\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.072443 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.072454 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xl44w\" (UniqueName: \"kubernetes.io/projected/e56b5819-c298-4533-9a40-7620e7af75c1-kube-api-access-xl44w\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.072464 4932 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e56b5819-c298-4533-9a40-7620e7af75c1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.072474 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.072484 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.080423 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2ea9c914-3d06-40d7-92f3-56f27f6c8900" (UID: "2ea9c914-3d06-40d7-92f3-56f27f6c8900"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.082648 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2ea9c914-3d06-40d7-92f3-56f27f6c8900" (UID: "2ea9c914-3d06-40d7-92f3-56f27f6c8900"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.089693 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2ea9c914-3d06-40d7-92f3-56f27f6c8900" (UID: "2ea9c914-3d06-40d7-92f3-56f27f6c8900"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.110507 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e56b5819-c298-4533-9a40-7620e7af75c1" (UID: "e56b5819-c298-4533-9a40-7620e7af75c1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.164343 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data" (OuterVolumeSpecName: "config-data") pod "e56b5819-c298-4533-9a40-7620e7af75c1" (UID: "e56b5819-c298-4533-9a40-7620e7af75c1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.178958 4932 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.178999 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.179011 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2ea9c914-3d06-40d7-92f3-56f27f6c8900-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.179022 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.179033 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e56b5819-c298-4533-9a40-7620e7af75c1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.364600 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6ktnm" event={"ID":"b9fc814a-d54a-4157-9257-db33b7734522","Type":"ContainerStarted","Data":"4864acfa3a39774595813e1aa545d79fe92ca72c92dee0bda77fccdc3c6b3214"} Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.367061 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e56b5819-c298-4533-9a40-7620e7af75c1","Type":"ContainerDied","Data":"21a25883869d7267ee4dd5386633b54ed802e007073c3a52c980dcb8741e692c"} Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.367078 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.367115 4932 scope.go:117] "RemoveContainer" containerID="047371326a65cbcd66896fe69decc959c902a3259f649af6714073deae1e8854" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.379526 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.381331 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b9b87645-8ngpb" event={"ID":"2ea9c914-3d06-40d7-92f3-56f27f6c8900","Type":"ContainerDied","Data":"77a9c3921441958cd006aeaac47456bd36d33f67f191ef4aaacb65589ae1bd34"} Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.381432 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.387175 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-6ktnm" podStartSLOduration=2.200740842 podStartE2EDuration="17.38715493s" podCreationTimestamp="2025-11-25 09:09:59 +0000 UTC" firstStartedPulling="2025-11-25 09:10:00.793570298 +0000 UTC m=+1260.919599871" lastFinishedPulling="2025-11-25 09:10:15.979984396 +0000 UTC m=+1276.106013959" observedRunningTime="2025-11-25 09:10:16.376558449 +0000 UTC m=+1276.502588012" watchObservedRunningTime="2025-11-25 09:10:16.38715493 +0000 UTC m=+1276.513184493" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.396155 4932 scope.go:117] "RemoveContainer" containerID="14d98a765c0db3f2216e071be3e5d12d51db407dc963fd836335a4812505b4c1" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.425780 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-8ngpb"] Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.431538 4932 scope.go:117] "RemoveContainer" containerID="c4cf0d747fb095e351de373eafc0beb87d2b0826cebea5d997c430b78e69d249" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.439384 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8b9b87645-8ngpb"] Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.453259 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.494230 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.510821 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.515892 4932 scope.go:117] "RemoveContainer" containerID="cff02c37abc945af69175f5d6a5ffd27f5b1d2ec23a8b025ec27782a3980e13d" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.545694 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.621324 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ea9c914-3d06-40d7-92f3-56f27f6c8900" path="/var/lib/kubelet/pods/2ea9c914-3d06-40d7-92f3-56f27f6c8900/volumes" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.622021 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b55fab6-584e-4098-beb0-be91c10e631f" path="/var/lib/kubelet/pods/3b55fab6-584e-4098-beb0-be91c10e631f/volumes" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.624091 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e56b5819-c298-4533-9a40-7620e7af75c1" path="/var/lib/kubelet/pods/e56b5819-c298-4533-9a40-7620e7af75c1/volumes" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.625325 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:10:16 crc kubenswrapper[4932]: E1125 09:10:16.625602 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ea9c914-3d06-40d7-92f3-56f27f6c8900" containerName="dnsmasq-dns" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.625619 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ea9c914-3d06-40d7-92f3-56f27f6c8900" containerName="dnsmasq-dns" Nov 25 09:10:16 crc kubenswrapper[4932]: E1125 09:10:16.625646 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e56b5819-c298-4533-9a40-7620e7af75c1" containerName="probe" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.625655 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e56b5819-c298-4533-9a40-7620e7af75c1" containerName="probe" Nov 25 09:10:16 crc kubenswrapper[4932]: E1125 09:10:16.625673 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b55fab6-584e-4098-beb0-be91c10e631f" containerName="glance-log" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.625681 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b55fab6-584e-4098-beb0-be91c10e631f" containerName="glance-log" Nov 25 09:10:16 crc kubenswrapper[4932]: E1125 09:10:16.625714 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e56b5819-c298-4533-9a40-7620e7af75c1" containerName="cinder-scheduler" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.625722 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e56b5819-c298-4533-9a40-7620e7af75c1" containerName="cinder-scheduler" Nov 25 09:10:16 crc kubenswrapper[4932]: E1125 09:10:16.625735 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b55fab6-584e-4098-beb0-be91c10e631f" containerName="glance-httpd" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.625741 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b55fab6-584e-4098-beb0-be91c10e631f" containerName="glance-httpd" Nov 25 09:10:16 crc kubenswrapper[4932]: E1125 09:10:16.625762 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ea9c914-3d06-40d7-92f3-56f27f6c8900" containerName="init" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.625769 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ea9c914-3d06-40d7-92f3-56f27f6c8900" containerName="init" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.625970 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b55fab6-584e-4098-beb0-be91c10e631f" containerName="glance-log" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.626082 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e56b5819-c298-4533-9a40-7620e7af75c1" containerName="cinder-scheduler" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.626098 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b55fab6-584e-4098-beb0-be91c10e631f" containerName="glance-httpd" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.626113 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ea9c914-3d06-40d7-92f3-56f27f6c8900" containerName="dnsmasq-dns" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.626158 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e56b5819-c298-4533-9a40-7620e7af75c1" containerName="probe" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.627270 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.631572 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.631857 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.637995 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.640126 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.641889 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.645457 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.652675 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.708741 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.708834 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.708856 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-scripts\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.708912 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.708960 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.709002 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.709027 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.709053 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-logs\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.709073 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.709105 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gq7g\" (UniqueName: \"kubernetes.io/projected/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-kube-api-access-8gq7g\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.709139 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.709311 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.709410 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh8q9\" (UniqueName: \"kubernetes.io/projected/c5101ae2-5106-48c7-9116-4c0e5ededb84-kube-api-access-mh8q9\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.709438 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.810826 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.810877 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-scripts\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.810911 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.810938 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.810977 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.810993 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.811011 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-logs\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.811026 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.811049 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gq7g\" (UniqueName: \"kubernetes.io/projected/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-kube-api-access-8gq7g\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.811070 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.811103 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.811150 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh8q9\" (UniqueName: \"kubernetes.io/projected/c5101ae2-5106-48c7-9116-4c0e5ededb84-kube-api-access-mh8q9\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.811169 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.811229 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.811395 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.813486 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.813697 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.814956 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-logs\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.817425 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.817840 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.819701 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.820628 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.824694 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-scripts\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.825477 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.827118 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.832529 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.834682 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gq7g\" (UniqueName: \"kubernetes.io/projected/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-kube-api-access-8gq7g\") pod \"cinder-scheduler-0\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " pod="openstack/cinder-scheduler-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.845337 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh8q9\" (UniqueName: \"kubernetes.io/projected/c5101ae2-5106-48c7-9116-4c0e5ededb84-kube-api-access-mh8q9\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.876810 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.956343 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:16 crc kubenswrapper[4932]: I1125 09:10:16.969330 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:10:17 crc kubenswrapper[4932]: I1125 09:10:17.398624 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61","Type":"ContainerStarted","Data":"84c8cfeb381f864d67634b78621a1b7460c7087ecdf9baf7bdd83200605e31e2"} Nov 25 09:10:17 crc kubenswrapper[4932]: I1125 09:10:17.533992 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:10:17 crc kubenswrapper[4932]: I1125 09:10:17.631234 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:10:18 crc kubenswrapper[4932]: I1125 09:10:18.432526 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c5101ae2-5106-48c7-9116-4c0e5ededb84","Type":"ContainerStarted","Data":"3d4d2ece1e5eef9d1d0e16758fced0df7cde0583ea5a26d7bbd9fa814e5ca952"} Nov 25 09:10:18 crc kubenswrapper[4932]: I1125 09:10:18.432829 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c5101ae2-5106-48c7-9116-4c0e5ededb84","Type":"ContainerStarted","Data":"9100a82045755f862259439d50e1bed4ab6f7c4cc3ed16bd1ac86db7a21762e3"} Nov 25 09:10:18 crc kubenswrapper[4932]: I1125 09:10:18.434908 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc","Type":"ContainerStarted","Data":"281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34"} Nov 25 09:10:18 crc kubenswrapper[4932]: I1125 09:10:18.434940 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc","Type":"ContainerStarted","Data":"95363e93c3264c1a7c8cec0f0f8120329a0b0f2f0d924b428548af444ee5a9b7"} Nov 25 09:10:18 crc kubenswrapper[4932]: I1125 09:10:18.439759 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61","Type":"ContainerStarted","Data":"b8682f71f2ee6925b54df3f64b25f4f743542faa8879099318a3b2e0226e6888"} Nov 25 09:10:18 crc kubenswrapper[4932]: I1125 09:10:18.471551 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=10.471532173 podStartE2EDuration="10.471532173s" podCreationTimestamp="2025-11-25 09:10:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:10:18.461158938 +0000 UTC m=+1278.587188501" watchObservedRunningTime="2025-11-25 09:10:18.471532173 +0000 UTC m=+1278.597561736" Nov 25 09:10:18 crc kubenswrapper[4932]: I1125 09:10:18.604846 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 09:10:18 crc kubenswrapper[4932]: I1125 09:10:18.605053 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 09:10:18 crc kubenswrapper[4932]: I1125 09:10:18.645544 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 09:10:18 crc kubenswrapper[4932]: I1125 09:10:18.663497 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 09:10:19 crc kubenswrapper[4932]: I1125 09:10:19.453039 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c5101ae2-5106-48c7-9116-4c0e5ededb84","Type":"ContainerStarted","Data":"72f71cf73b9865b04d4d3de5c8547c8ca66dceb1900d89f1ff42c5d833013afd"} Nov 25 09:10:19 crc kubenswrapper[4932]: I1125 09:10:19.456513 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc","Type":"ContainerStarted","Data":"8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140"} Nov 25 09:10:19 crc kubenswrapper[4932]: I1125 09:10:19.456647 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:10:19 crc kubenswrapper[4932]: I1125 09:10:19.456667 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 09:10:19 crc kubenswrapper[4932]: I1125 09:10:19.484002 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.48397698 podStartE2EDuration="3.48397698s" podCreationTimestamp="2025-11-25 09:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:10:19.47443315 +0000 UTC m=+1279.600462733" watchObservedRunningTime="2025-11-25 09:10:19.48397698 +0000 UTC m=+1279.610006543" Nov 25 09:10:19 crc kubenswrapper[4932]: I1125 09:10:19.495329 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.4953061229999998 podStartE2EDuration="3.495306123s" podCreationTimestamp="2025-11-25 09:10:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:10:19.494102047 +0000 UTC m=+1279.620131620" watchObservedRunningTime="2025-11-25 09:10:19.495306123 +0000 UTC m=+1279.621335686" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.023925 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.196366 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-log-httpd\") pod \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.196436 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-config-data\") pod \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.196478 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-run-httpd\") pod \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.196505 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-combined-ca-bundle\") pod \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.196521 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-sg-core-conf-yaml\") pod \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.196544 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-scripts\") pod \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.196613 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdz72\" (UniqueName: \"kubernetes.io/projected/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-kube-api-access-bdz72\") pod \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.197663 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4e0e2802-c55a-4f62-8279-c2d11b3d54ee" (UID: "4e0e2802-c55a-4f62-8279-c2d11b3d54ee"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.197675 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4e0e2802-c55a-4f62-8279-c2d11b3d54ee" (UID: "4e0e2802-c55a-4f62-8279-c2d11b3d54ee"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.201825 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-kube-api-access-bdz72" (OuterVolumeSpecName: "kube-api-access-bdz72") pod "4e0e2802-c55a-4f62-8279-c2d11b3d54ee" (UID: "4e0e2802-c55a-4f62-8279-c2d11b3d54ee"). InnerVolumeSpecName "kube-api-access-bdz72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.202362 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-scripts" (OuterVolumeSpecName: "scripts") pod "4e0e2802-c55a-4f62-8279-c2d11b3d54ee" (UID: "4e0e2802-c55a-4f62-8279-c2d11b3d54ee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.225205 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4e0e2802-c55a-4f62-8279-c2d11b3d54ee" (UID: "4e0e2802-c55a-4f62-8279-c2d11b3d54ee"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.319742 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.319778 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.319792 4932 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.319807 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.319821 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdz72\" (UniqueName: \"kubernetes.io/projected/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-kube-api-access-bdz72\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.359377 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4e0e2802-c55a-4f62-8279-c2d11b3d54ee" (UID: "4e0e2802-c55a-4f62-8279-c2d11b3d54ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.422404 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-config-data" (OuterVolumeSpecName: "config-data") pod "4e0e2802-c55a-4f62-8279-c2d11b3d54ee" (UID: "4e0e2802-c55a-4f62-8279-c2d11b3d54ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.423779 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-config-data\") pod \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\" (UID: \"4e0e2802-c55a-4f62-8279-c2d11b3d54ee\") " Nov 25 09:10:21 crc kubenswrapper[4932]: W1125 09:10:21.424101 4932 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/4e0e2802-c55a-4f62-8279-c2d11b3d54ee/volumes/kubernetes.io~secret/config-data Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.424124 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-config-data" (OuterVolumeSpecName: "config-data") pod "4e0e2802-c55a-4f62-8279-c2d11b3d54ee" (UID: "4e0e2802-c55a-4f62-8279-c2d11b3d54ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.424360 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.424384 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e0e2802-c55a-4f62-8279-c2d11b3d54ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.480238 4932 generic.go:334] "Generic (PLEG): container finished" podID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerID="90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0" exitCode=0 Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.480299 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4e0e2802-c55a-4f62-8279-c2d11b3d54ee","Type":"ContainerDied","Data":"90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0"} Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.480336 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4e0e2802-c55a-4f62-8279-c2d11b3d54ee","Type":"ContainerDied","Data":"6f04e942706791ae76b7cf1dc6e1b30fbd8437d9111f17878851deead2680337"} Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.480357 4932 scope.go:117] "RemoveContainer" containerID="8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.480685 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.516747 4932 scope.go:117] "RemoveContainer" containerID="f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.536286 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.551448 4932 scope.go:117] "RemoveContainer" containerID="d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.555003 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.572154 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:21 crc kubenswrapper[4932]: E1125 09:10:21.576134 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="ceilometer-notification-agent" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.576205 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="ceilometer-notification-agent" Nov 25 09:10:21 crc kubenswrapper[4932]: E1125 09:10:21.576224 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="ceilometer-central-agent" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.576231 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="ceilometer-central-agent" Nov 25 09:10:21 crc kubenswrapper[4932]: E1125 09:10:21.576268 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="sg-core" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.576274 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="sg-core" Nov 25 09:10:21 crc kubenswrapper[4932]: E1125 09:10:21.576285 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="proxy-httpd" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.576291 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="proxy-httpd" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.577424 4932 scope.go:117] "RemoveContainer" containerID="90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.592424 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="proxy-httpd" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.592474 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="ceilometer-notification-agent" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.592517 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="ceilometer-central-agent" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.592533 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" containerName="sg-core" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.598624 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.601257 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.601369 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.617243 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.618141 4932 scope.go:117] "RemoveContainer" containerID="8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec" Nov 25 09:10:21 crc kubenswrapper[4932]: E1125 09:10:21.625096 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec\": container with ID starting with 8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec not found: ID does not exist" containerID="8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.625134 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec"} err="failed to get container status \"8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec\": rpc error: code = NotFound desc = could not find container \"8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec\": container with ID starting with 8c6fc570c430b9a1f78eab9a67f7565ff7e4a4e43947525d81a276769e4abeec not found: ID does not exist" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.625157 4932 scope.go:117] "RemoveContainer" containerID="f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07" Nov 25 09:10:21 crc kubenswrapper[4932]: E1125 09:10:21.626056 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07\": container with ID starting with f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07 not found: ID does not exist" containerID="f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.626079 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07"} err="failed to get container status \"f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07\": rpc error: code = NotFound desc = could not find container \"f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07\": container with ID starting with f1ccdc67b927cb0a1fba085ffe1d9c4654d8a4a5ffcc82c19a21a63474d52e07 not found: ID does not exist" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.626094 4932 scope.go:117] "RemoveContainer" containerID="d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364" Nov 25 09:10:21 crc kubenswrapper[4932]: E1125 09:10:21.626752 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364\": container with ID starting with d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364 not found: ID does not exist" containerID="d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.626790 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364"} err="failed to get container status \"d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364\": rpc error: code = NotFound desc = could not find container \"d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364\": container with ID starting with d9563a86e082e3947142338b6402582bf2ccd34b7bcd9b8859db1fcbfc676364 not found: ID does not exist" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.626810 4932 scope.go:117] "RemoveContainer" containerID="90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0" Nov 25 09:10:21 crc kubenswrapper[4932]: E1125 09:10:21.627401 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0\": container with ID starting with 90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0 not found: ID does not exist" containerID="90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.627453 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0"} err="failed to get container status \"90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0\": rpc error: code = NotFound desc = could not find container \"90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0\": container with ID starting with 90198ee11c4363be4c2f7cf8d0f6522c825b02e7e3d2a9ef004e5d3d8c4035c0 not found: ID does not exist" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.729648 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-log-httpd\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.729750 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxqxs\" (UniqueName: \"kubernetes.io/projected/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-kube-api-access-vxqxs\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.729815 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.730164 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-scripts\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.730226 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-config-data\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.730258 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.730307 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-run-httpd\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.831964 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-scripts\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.832014 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-config-data\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.832037 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.832060 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-run-httpd\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.832110 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-log-httpd\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.832827 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxqxs\" (UniqueName: \"kubernetes.io/projected/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-kube-api-access-vxqxs\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.832868 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.833472 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-log-httpd\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.833618 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-run-httpd\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.837370 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-scripts\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.841762 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.850427 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-config-data\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.853449 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.855604 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxqxs\" (UniqueName: \"kubernetes.io/projected/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-kube-api-access-vxqxs\") pod \"ceilometer-0\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.926750 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:21 crc kubenswrapper[4932]: I1125 09:10:21.971979 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 09:10:22 crc kubenswrapper[4932]: I1125 09:10:22.403050 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:22 crc kubenswrapper[4932]: W1125 09:10:22.408668 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod05af81ac_87a7_40b4_8ba7_2212c06a1d9a.slice/crio-5f85557be8bda38d832dc66ea9d52690ccdfd3140108d510b8198d8541c04ceb WatchSource:0}: Error finding container 5f85557be8bda38d832dc66ea9d52690ccdfd3140108d510b8198d8541c04ceb: Status 404 returned error can't find the container with id 5f85557be8bda38d832dc66ea9d52690ccdfd3140108d510b8198d8541c04ceb Nov 25 09:10:22 crc kubenswrapper[4932]: I1125 09:10:22.491610 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"05af81ac-87a7-40b4-8ba7-2212c06a1d9a","Type":"ContainerStarted","Data":"5f85557be8bda38d832dc66ea9d52690ccdfd3140108d510b8198d8541c04ceb"} Nov 25 09:10:22 crc kubenswrapper[4932]: I1125 09:10:22.619563 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e0e2802-c55a-4f62-8279-c2d11b3d54ee" path="/var/lib/kubelet/pods/4e0e2802-c55a-4f62-8279-c2d11b3d54ee/volumes" Nov 25 09:10:23 crc kubenswrapper[4932]: I1125 09:10:23.505749 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"05af81ac-87a7-40b4-8ba7-2212c06a1d9a","Type":"ContainerStarted","Data":"9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693"} Nov 25 09:10:24 crc kubenswrapper[4932]: I1125 09:10:24.516454 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"05af81ac-87a7-40b4-8ba7-2212c06a1d9a","Type":"ContainerStarted","Data":"60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486"} Nov 25 09:10:24 crc kubenswrapper[4932]: I1125 09:10:24.517084 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"05af81ac-87a7-40b4-8ba7-2212c06a1d9a","Type":"ContainerStarted","Data":"8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b"} Nov 25 09:10:26 crc kubenswrapper[4932]: I1125 09:10:26.535582 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"05af81ac-87a7-40b4-8ba7-2212c06a1d9a","Type":"ContainerStarted","Data":"be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664"} Nov 25 09:10:26 crc kubenswrapper[4932]: I1125 09:10:26.537264 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:10:26 crc kubenswrapper[4932]: I1125 09:10:26.566660 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.140304313 podStartE2EDuration="5.566632899s" podCreationTimestamp="2025-11-25 09:10:21 +0000 UTC" firstStartedPulling="2025-11-25 09:10:22.410934939 +0000 UTC m=+1282.536964502" lastFinishedPulling="2025-11-25 09:10:25.837263525 +0000 UTC m=+1285.963293088" observedRunningTime="2025-11-25 09:10:26.556845683 +0000 UTC m=+1286.682875246" watchObservedRunningTime="2025-11-25 09:10:26.566632899 +0000 UTC m=+1286.692662462" Nov 25 09:10:26 crc kubenswrapper[4932]: I1125 09:10:26.957914 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:26 crc kubenswrapper[4932]: I1125 09:10:26.959681 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:26 crc kubenswrapper[4932]: I1125 09:10:26.997649 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:27 crc kubenswrapper[4932]: I1125 09:10:27.025983 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:27 crc kubenswrapper[4932]: I1125 09:10:27.210883 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 09:10:27 crc kubenswrapper[4932]: I1125 09:10:27.543392 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:27 crc kubenswrapper[4932]: I1125 09:10:27.543456 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:29 crc kubenswrapper[4932]: I1125 09:10:29.481084 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:29 crc kubenswrapper[4932]: I1125 09:10:29.577995 4932 generic.go:334] "Generic (PLEG): container finished" podID="b9fc814a-d54a-4157-9257-db33b7734522" containerID="4864acfa3a39774595813e1aa545d79fe92ca72c92dee0bda77fccdc3c6b3214" exitCode=0 Nov 25 09:10:29 crc kubenswrapper[4932]: I1125 09:10:29.578129 4932 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 09:10:29 crc kubenswrapper[4932]: I1125 09:10:29.578500 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6ktnm" event={"ID":"b9fc814a-d54a-4157-9257-db33b7734522","Type":"ContainerDied","Data":"4864acfa3a39774595813e1aa545d79fe92ca72c92dee0bda77fccdc3c6b3214"} Nov 25 09:10:29 crc kubenswrapper[4932]: I1125 09:10:29.631939 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 09:10:30 crc kubenswrapper[4932]: I1125 09:10:30.175507 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:30 crc kubenswrapper[4932]: I1125 09:10:30.176161 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="ceilometer-central-agent" containerID="cri-o://9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693" gracePeriod=30 Nov 25 09:10:30 crc kubenswrapper[4932]: I1125 09:10:30.176335 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="proxy-httpd" containerID="cri-o://be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664" gracePeriod=30 Nov 25 09:10:30 crc kubenswrapper[4932]: I1125 09:10:30.176391 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="sg-core" containerID="cri-o://60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486" gracePeriod=30 Nov 25 09:10:30 crc kubenswrapper[4932]: I1125 09:10:30.176434 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="ceilometer-notification-agent" containerID="cri-o://8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b" gracePeriod=30 Nov 25 09:10:30 crc kubenswrapper[4932]: I1125 09:10:30.599619 4932 generic.go:334] "Generic (PLEG): container finished" podID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerID="be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664" exitCode=0 Nov 25 09:10:30 crc kubenswrapper[4932]: I1125 09:10:30.599936 4932 generic.go:334] "Generic (PLEG): container finished" podID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerID="60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486" exitCode=2 Nov 25 09:10:30 crc kubenswrapper[4932]: I1125 09:10:30.599798 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"05af81ac-87a7-40b4-8ba7-2212c06a1d9a","Type":"ContainerDied","Data":"be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664"} Nov 25 09:10:30 crc kubenswrapper[4932]: I1125 09:10:30.600174 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"05af81ac-87a7-40b4-8ba7-2212c06a1d9a","Type":"ContainerDied","Data":"60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486"} Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.062571 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.072678 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133099 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-run-httpd\") pod \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133148 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-scripts\") pod \"b9fc814a-d54a-4157-9257-db33b7734522\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133182 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-sg-core-conf-yaml\") pod \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133248 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-config-data\") pod \"b9fc814a-d54a-4157-9257-db33b7734522\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133273 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-config-data\") pod \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133355 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-scripts\") pod \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133442 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fg222\" (UniqueName: \"kubernetes.io/projected/b9fc814a-d54a-4157-9257-db33b7734522-kube-api-access-fg222\") pod \"b9fc814a-d54a-4157-9257-db33b7734522\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133475 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-combined-ca-bundle\") pod \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133499 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-combined-ca-bundle\") pod \"b9fc814a-d54a-4157-9257-db33b7734522\" (UID: \"b9fc814a-d54a-4157-9257-db33b7734522\") " Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133543 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxqxs\" (UniqueName: \"kubernetes.io/projected/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-kube-api-access-vxqxs\") pod \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133579 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-log-httpd\") pod \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\" (UID: \"05af81ac-87a7-40b4-8ba7-2212c06a1d9a\") " Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133720 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "05af81ac-87a7-40b4-8ba7-2212c06a1d9a" (UID: "05af81ac-87a7-40b4-8ba7-2212c06a1d9a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.133999 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.134398 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "05af81ac-87a7-40b4-8ba7-2212c06a1d9a" (UID: "05af81ac-87a7-40b4-8ba7-2212c06a1d9a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.147665 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9fc814a-d54a-4157-9257-db33b7734522-kube-api-access-fg222" (OuterVolumeSpecName: "kube-api-access-fg222") pod "b9fc814a-d54a-4157-9257-db33b7734522" (UID: "b9fc814a-d54a-4157-9257-db33b7734522"). InnerVolumeSpecName "kube-api-access-fg222". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.147729 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-kube-api-access-vxqxs" (OuterVolumeSpecName: "kube-api-access-vxqxs") pod "05af81ac-87a7-40b4-8ba7-2212c06a1d9a" (UID: "05af81ac-87a7-40b4-8ba7-2212c06a1d9a"). InnerVolumeSpecName "kube-api-access-vxqxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.148508 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-scripts" (OuterVolumeSpecName: "scripts") pod "05af81ac-87a7-40b4-8ba7-2212c06a1d9a" (UID: "05af81ac-87a7-40b4-8ba7-2212c06a1d9a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.150701 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-scripts" (OuterVolumeSpecName: "scripts") pod "b9fc814a-d54a-4157-9257-db33b7734522" (UID: "b9fc814a-d54a-4157-9257-db33b7734522"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.170308 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-config-data" (OuterVolumeSpecName: "config-data") pod "b9fc814a-d54a-4157-9257-db33b7734522" (UID: "b9fc814a-d54a-4157-9257-db33b7734522"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.179596 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b9fc814a-d54a-4157-9257-db33b7734522" (UID: "b9fc814a-d54a-4157-9257-db33b7734522"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.188536 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "05af81ac-87a7-40b4-8ba7-2212c06a1d9a" (UID: "05af81ac-87a7-40b4-8ba7-2212c06a1d9a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.224436 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "05af81ac-87a7-40b4-8ba7-2212c06a1d9a" (UID: "05af81ac-87a7-40b4-8ba7-2212c06a1d9a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.237157 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fg222\" (UniqueName: \"kubernetes.io/projected/b9fc814a-d54a-4157-9257-db33b7734522-kube-api-access-fg222\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.237230 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.237250 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.237265 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxqxs\" (UniqueName: \"kubernetes.io/projected/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-kube-api-access-vxqxs\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.237281 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.237296 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.237310 4932 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.237324 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9fc814a-d54a-4157-9257-db33b7734522-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.237341 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.256479 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-config-data" (OuterVolumeSpecName: "config-data") pod "05af81ac-87a7-40b4-8ba7-2212c06a1d9a" (UID: "05af81ac-87a7-40b4-8ba7-2212c06a1d9a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.338435 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05af81ac-87a7-40b4-8ba7-2212c06a1d9a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.611310 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-6ktnm" event={"ID":"b9fc814a-d54a-4157-9257-db33b7734522","Type":"ContainerDied","Data":"2c1daf48829f22373c8ef3759696d4eb5596903a7ebdc3056be63c2cb907231c"} Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.611351 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c1daf48829f22373c8ef3759696d4eb5596903a7ebdc3056be63c2cb907231c" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.611348 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-6ktnm" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.615238 4932 generic.go:334] "Generic (PLEG): container finished" podID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerID="8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b" exitCode=0 Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.615266 4932 generic.go:334] "Generic (PLEG): container finished" podID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerID="9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693" exitCode=0 Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.615288 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"05af81ac-87a7-40b4-8ba7-2212c06a1d9a","Type":"ContainerDied","Data":"8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b"} Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.615314 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.615336 4932 scope.go:117] "RemoveContainer" containerID="be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.615322 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"05af81ac-87a7-40b4-8ba7-2212c06a1d9a","Type":"ContainerDied","Data":"9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693"} Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.615491 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"05af81ac-87a7-40b4-8ba7-2212c06a1d9a","Type":"ContainerDied","Data":"5f85557be8bda38d832dc66ea9d52690ccdfd3140108d510b8198d8541c04ceb"} Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.644549 4932 scope.go:117] "RemoveContainer" containerID="60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.662482 4932 scope.go:117] "RemoveContainer" containerID="8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.684084 4932 scope.go:117] "RemoveContainer" containerID="9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.684301 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.700262 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.712625 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:31 crc kubenswrapper[4932]: E1125 09:10:31.716143 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9fc814a-d54a-4157-9257-db33b7734522" containerName="nova-cell0-conductor-db-sync" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.716171 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9fc814a-d54a-4157-9257-db33b7734522" containerName="nova-cell0-conductor-db-sync" Nov 25 09:10:31 crc kubenswrapper[4932]: E1125 09:10:31.716228 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="ceilometer-notification-agent" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.716235 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="ceilometer-notification-agent" Nov 25 09:10:31 crc kubenswrapper[4932]: E1125 09:10:31.716248 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="sg-core" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.716255 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="sg-core" Nov 25 09:10:31 crc kubenswrapper[4932]: E1125 09:10:31.716275 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="ceilometer-central-agent" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.716282 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="ceilometer-central-agent" Nov 25 09:10:31 crc kubenswrapper[4932]: E1125 09:10:31.716298 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="proxy-httpd" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.716304 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="proxy-httpd" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.717083 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="proxy-httpd" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.717103 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9fc814a-d54a-4157-9257-db33b7734522" containerName="nova-cell0-conductor-db-sync" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.717127 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="ceilometer-central-agent" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.717145 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="ceilometer-notification-agent" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.717157 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" containerName="sg-core" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.718931 4932 scope.go:117] "RemoveContainer" containerID="be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.724384 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.727957 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.729090 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.736698 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.751564 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:10:31 crc kubenswrapper[4932]: E1125 09:10:31.770964 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664\": container with ID starting with be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664 not found: ID does not exist" containerID="be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.771096 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.771269 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664"} err="failed to get container status \"be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664\": rpc error: code = NotFound desc = could not find container \"be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664\": container with ID starting with be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664 not found: ID does not exist" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.771771 4932 scope.go:117] "RemoveContainer" containerID="60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.774640 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-tp97l" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.775050 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 09:10:31 crc kubenswrapper[4932]: E1125 09:10:31.775684 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486\": container with ID starting with 60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486 not found: ID does not exist" containerID="60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.775735 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486"} err="failed to get container status \"60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486\": rpc error: code = NotFound desc = could not find container \"60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486\": container with ID starting with 60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486 not found: ID does not exist" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.775766 4932 scope.go:117] "RemoveContainer" containerID="8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b" Nov 25 09:10:31 crc kubenswrapper[4932]: E1125 09:10:31.778060 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b\": container with ID starting with 8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b not found: ID does not exist" containerID="8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.778104 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b"} err="failed to get container status \"8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b\": rpc error: code = NotFound desc = could not find container \"8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b\": container with ID starting with 8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b not found: ID does not exist" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.778122 4932 scope.go:117] "RemoveContainer" containerID="9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693" Nov 25 09:10:31 crc kubenswrapper[4932]: E1125 09:10:31.778581 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693\": container with ID starting with 9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693 not found: ID does not exist" containerID="9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.778618 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693"} err="failed to get container status \"9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693\": rpc error: code = NotFound desc = could not find container \"9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693\": container with ID starting with 9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693 not found: ID does not exist" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.778646 4932 scope.go:117] "RemoveContainer" containerID="be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.778930 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664"} err="failed to get container status \"be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664\": rpc error: code = NotFound desc = could not find container \"be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664\": container with ID starting with be04441435a36787ed388747b63d51538fbdfc62a5c86718fbd10337290ae664 not found: ID does not exist" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.778947 4932 scope.go:117] "RemoveContainer" containerID="60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.779205 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486"} err="failed to get container status \"60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486\": rpc error: code = NotFound desc = could not find container \"60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486\": container with ID starting with 60a0f531fbc13cb4a9e110c387001cfc0e9292e0a7e254c9c32f0e0d73bf4486 not found: ID does not exist" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.779225 4932 scope.go:117] "RemoveContainer" containerID="8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.779522 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b"} err="failed to get container status \"8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b\": rpc error: code = NotFound desc = could not find container \"8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b\": container with ID starting with 8bda06e2c007018011df37eb23a1d3af5f13b240a1e9e8131ec53c5a5c1ed60b not found: ID does not exist" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.779547 4932 scope.go:117] "RemoveContainer" containerID="9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.779756 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693"} err="failed to get container status \"9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693\": rpc error: code = NotFound desc = could not find container \"9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693\": container with ID starting with 9e618370ba7ec8fd464d7646ab0a525d44647d4e5bf40b3b3e26807d4c65c693 not found: ID does not exist" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.781038 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.850329 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-run-httpd\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.850390 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.850412 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gx7ts\" (UniqueName: \"kubernetes.io/projected/e381bcc8-6d35-4692-b76a-a28541c6460a-kube-api-access-gx7ts\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.850499 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-scripts\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.850577 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.850667 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.850893 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc966\" (UniqueName: \"kubernetes.io/projected/06f961a8-19e0-4902-9519-984f0d7bd4cd-kube-api-access-vc966\") pod \"nova-cell0-conductor-0\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.850963 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-log-httpd\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.851026 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-config-data\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.851042 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.951991 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.952041 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gx7ts\" (UniqueName: \"kubernetes.io/projected/e381bcc8-6d35-4692-b76a-a28541c6460a-kube-api-access-gx7ts\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.952069 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-scripts\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.952964 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.953012 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.953100 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc966\" (UniqueName: \"kubernetes.io/projected/06f961a8-19e0-4902-9519-984f0d7bd4cd-kube-api-access-vc966\") pod \"nova-cell0-conductor-0\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.953132 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-log-httpd\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.953165 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-config-data\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.953180 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.953231 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-run-httpd\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.953606 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-run-httpd\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.953664 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-log-httpd\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.958702 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-scripts\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.961826 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.961883 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.961891 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.962396 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-config-data\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.967894 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.969555 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gx7ts\" (UniqueName: \"kubernetes.io/projected/e381bcc8-6d35-4692-b76a-a28541c6460a-kube-api-access-gx7ts\") pod \"ceilometer-0\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " pod="openstack/ceilometer-0" Nov 25 09:10:31 crc kubenswrapper[4932]: I1125 09:10:31.981428 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc966\" (UniqueName: \"kubernetes.io/projected/06f961a8-19e0-4902-9519-984f0d7bd4cd-kube-api-access-vc966\") pod \"nova-cell0-conductor-0\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:32 crc kubenswrapper[4932]: I1125 09:10:32.045905 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:10:32 crc kubenswrapper[4932]: I1125 09:10:32.046814 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:32 crc kubenswrapper[4932]: I1125 09:10:32.071069 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:32 crc kubenswrapper[4932]: I1125 09:10:32.133271 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:32 crc kubenswrapper[4932]: I1125 09:10:32.385672 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:32 crc kubenswrapper[4932]: I1125 09:10:32.585077 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:10:32 crc kubenswrapper[4932]: W1125 09:10:32.592154 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod06f961a8_19e0_4902_9519_984f0d7bd4cd.slice/crio-4a64e4864bb753982ace7144ddfc8e48f6f8490c9332c814afa38126e5999f9c WatchSource:0}: Error finding container 4a64e4864bb753982ace7144ddfc8e48f6f8490c9332c814afa38126e5999f9c: Status 404 returned error can't find the container with id 4a64e4864bb753982ace7144ddfc8e48f6f8490c9332c814afa38126e5999f9c Nov 25 09:10:32 crc kubenswrapper[4932]: I1125 09:10:32.622822 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05af81ac-87a7-40b4-8ba7-2212c06a1d9a" path="/var/lib/kubelet/pods/05af81ac-87a7-40b4-8ba7-2212c06a1d9a/volumes" Nov 25 09:10:32 crc kubenswrapper[4932]: I1125 09:10:32.630315 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:32 crc kubenswrapper[4932]: I1125 09:10:32.630405 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"06f961a8-19e0-4902-9519-984f0d7bd4cd","Type":"ContainerStarted","Data":"4a64e4864bb753982ace7144ddfc8e48f6f8490c9332c814afa38126e5999f9c"} Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.265118 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.396091 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-combined-ca-bundle\") pod \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.396233 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-etc-machine-id\") pod \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.396338 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data\") pod \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.396394 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlx26\" (UniqueName: \"kubernetes.io/projected/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-kube-api-access-hlx26\") pod \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.396469 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-logs\") pod \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.396469 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "3637f5df-c0aa-4cf4-9885-4a0e63886fb6" (UID: "3637f5df-c0aa-4cf4-9885-4a0e63886fb6"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.396507 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-scripts\") pod \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.396535 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data-custom\") pod \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\" (UID: \"3637f5df-c0aa-4cf4-9885-4a0e63886fb6\") " Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.397003 4932 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.397284 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-logs" (OuterVolumeSpecName: "logs") pod "3637f5df-c0aa-4cf4-9885-4a0e63886fb6" (UID: "3637f5df-c0aa-4cf4-9885-4a0e63886fb6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.400165 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-scripts" (OuterVolumeSpecName: "scripts") pod "3637f5df-c0aa-4cf4-9885-4a0e63886fb6" (UID: "3637f5df-c0aa-4cf4-9885-4a0e63886fb6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.401006 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-kube-api-access-hlx26" (OuterVolumeSpecName: "kube-api-access-hlx26") pod "3637f5df-c0aa-4cf4-9885-4a0e63886fb6" (UID: "3637f5df-c0aa-4cf4-9885-4a0e63886fb6"). InnerVolumeSpecName "kube-api-access-hlx26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.401782 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3637f5df-c0aa-4cf4-9885-4a0e63886fb6" (UID: "3637f5df-c0aa-4cf4-9885-4a0e63886fb6"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.445381 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3637f5df-c0aa-4cf4-9885-4a0e63886fb6" (UID: "3637f5df-c0aa-4cf4-9885-4a0e63886fb6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.464917 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data" (OuterVolumeSpecName: "config-data") pod "3637f5df-c0aa-4cf4-9885-4a0e63886fb6" (UID: "3637f5df-c0aa-4cf4-9885-4a0e63886fb6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.499157 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlx26\" (UniqueName: \"kubernetes.io/projected/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-kube-api-access-hlx26\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.499215 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.499256 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.499271 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.499283 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.499296 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3637f5df-c0aa-4cf4-9885-4a0e63886fb6-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.649716 4932 generic.go:334] "Generic (PLEG): container finished" podID="3637f5df-c0aa-4cf4-9885-4a0e63886fb6" containerID="4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2" exitCode=137 Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.649771 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3637f5df-c0aa-4cf4-9885-4a0e63886fb6","Type":"ContainerDied","Data":"4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2"} Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.649802 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3637f5df-c0aa-4cf4-9885-4a0e63886fb6","Type":"ContainerDied","Data":"be3d744105a5bc3d322d1ce2ae81aa5f091bfa7074c8aea77fd206de4c3e6533"} Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.649822 4932 scope.go:117] "RemoveContainer" containerID="4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.649922 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.662084 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e381bcc8-6d35-4692-b76a-a28541c6460a","Type":"ContainerStarted","Data":"b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b"} Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.662140 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e381bcc8-6d35-4692-b76a-a28541c6460a","Type":"ContainerStarted","Data":"844cefb582fcaf48d19dc6063778e8cfa4defad23c3b7fc92e190e9e64ebea8f"} Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.664742 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"06f961a8-19e0-4902-9519-984f0d7bd4cd","Type":"ContainerStarted","Data":"9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff"} Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.664892 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="06f961a8-19e0-4902-9519-984f0d7bd4cd" containerName="nova-cell0-conductor-conductor" containerID="cri-o://9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" gracePeriod=30 Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.665182 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.698171 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.698150251 podStartE2EDuration="2.698150251s" podCreationTimestamp="2025-11-25 09:10:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:10:33.683262055 +0000 UTC m=+1293.809291648" watchObservedRunningTime="2025-11-25 09:10:33.698150251 +0000 UTC m=+1293.824179814" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.711650 4932 scope.go:117] "RemoveContainer" containerID="93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.715151 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.733129 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.747288 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:10:33 crc kubenswrapper[4932]: E1125 09:10:33.747753 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3637f5df-c0aa-4cf4-9885-4a0e63886fb6" containerName="cinder-api-log" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.747769 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3637f5df-c0aa-4cf4-9885-4a0e63886fb6" containerName="cinder-api-log" Nov 25 09:10:33 crc kubenswrapper[4932]: E1125 09:10:33.747795 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3637f5df-c0aa-4cf4-9885-4a0e63886fb6" containerName="cinder-api" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.747804 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3637f5df-c0aa-4cf4-9885-4a0e63886fb6" containerName="cinder-api" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.747986 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3637f5df-c0aa-4cf4-9885-4a0e63886fb6" containerName="cinder-api" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.748007 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3637f5df-c0aa-4cf4-9885-4a0e63886fb6" containerName="cinder-api-log" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.748988 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.754233 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.754679 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.754729 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.754927 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.756293 4932 scope.go:117] "RemoveContainer" containerID="4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2" Nov 25 09:10:33 crc kubenswrapper[4932]: E1125 09:10:33.761442 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2\": container with ID starting with 4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2 not found: ID does not exist" containerID="4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.761495 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2"} err="failed to get container status \"4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2\": rpc error: code = NotFound desc = could not find container \"4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2\": container with ID starting with 4efe72025a95857f61df0304f29b97dfc9ad67e9c6836cd391dffca43db108a2 not found: ID does not exist" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.761536 4932 scope.go:117] "RemoveContainer" containerID="93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1" Nov 25 09:10:33 crc kubenswrapper[4932]: E1125 09:10:33.762321 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1\": container with ID starting with 93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1 not found: ID does not exist" containerID="93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.762362 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1"} err="failed to get container status \"93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1\": rpc error: code = NotFound desc = could not find container \"93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1\": container with ID starting with 93c87318ba3aed00aa48d93e557e131ff7b33f9a482dfdc397a6d3456b5ae6e1 not found: ID does not exist" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.907916 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zgf4\" (UniqueName: \"kubernetes.io/projected/c7865402-5a21-44f9-9436-d5d1bab67a07-kube-api-access-6zgf4\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.907984 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.908050 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-public-tls-certs\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.908086 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7865402-5a21-44f9-9436-d5d1bab67a07-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.908128 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-scripts\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.908280 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.908370 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.908425 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data-custom\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:33 crc kubenswrapper[4932]: I1125 09:10:33.908524 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7865402-5a21-44f9-9436-d5d1bab67a07-logs\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.010363 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-scripts\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.010424 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.010483 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.010501 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data-custom\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.010550 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7865402-5a21-44f9-9436-d5d1bab67a07-logs\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.010574 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zgf4\" (UniqueName: \"kubernetes.io/projected/c7865402-5a21-44f9-9436-d5d1bab67a07-kube-api-access-6zgf4\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.010599 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.010618 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-public-tls-certs\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.010638 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7865402-5a21-44f9-9436-d5d1bab67a07-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.010713 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7865402-5a21-44f9-9436-d5d1bab67a07-etc-machine-id\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.011094 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7865402-5a21-44f9-9436-d5d1bab67a07-logs\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.014758 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.015589 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-public-tls-certs\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.015632 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-scripts\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.018064 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.018520 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data-custom\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.018894 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.033711 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zgf4\" (UniqueName: \"kubernetes.io/projected/c7865402-5a21-44f9-9436-d5d1bab67a07-kube-api-access-6zgf4\") pod \"cinder-api-0\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.074178 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.546700 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:10:34 crc kubenswrapper[4932]: W1125 09:10:34.550671 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc7865402_5a21_44f9_9436_d5d1bab67a07.slice/crio-5390171e2f1d5baf3a0c95de61ae540ccbbe324cef3d85a44562125aee7d8634 WatchSource:0}: Error finding container 5390171e2f1d5baf3a0c95de61ae540ccbbe324cef3d85a44562125aee7d8634: Status 404 returned error can't find the container with id 5390171e2f1d5baf3a0c95de61ae540ccbbe324cef3d85a44562125aee7d8634 Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.637424 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3637f5df-c0aa-4cf4-9885-4a0e63886fb6" path="/var/lib/kubelet/pods/3637f5df-c0aa-4cf4-9885-4a0e63886fb6/volumes" Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.699303 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e381bcc8-6d35-4692-b76a-a28541c6460a","Type":"ContainerStarted","Data":"4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36"} Nov 25 09:10:34 crc kubenswrapper[4932]: I1125 09:10:34.700270 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7865402-5a21-44f9-9436-d5d1bab67a07","Type":"ContainerStarted","Data":"5390171e2f1d5baf3a0c95de61ae540ccbbe324cef3d85a44562125aee7d8634"} Nov 25 09:10:35 crc kubenswrapper[4932]: I1125 09:10:35.716836 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e381bcc8-6d35-4692-b76a-a28541c6460a","Type":"ContainerStarted","Data":"f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324"} Nov 25 09:10:35 crc kubenswrapper[4932]: I1125 09:10:35.719510 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7865402-5a21-44f9-9436-d5d1bab67a07","Type":"ContainerStarted","Data":"c8ae9ee3aa8405ff65c9452ed08700eda42757ab1407937bb5f3003fe4cf7a9e"} Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.333276 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.391525 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-77fc89ff58-gpqc5"] Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.391785 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-77fc89ff58-gpqc5" podUID="d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" containerName="neutron-api" containerID="cri-o://a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9" gracePeriod=30 Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.392226 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-77fc89ff58-gpqc5" podUID="d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" containerName="neutron-httpd" containerID="cri-o://95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0" gracePeriod=30 Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.737973 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e381bcc8-6d35-4692-b76a-a28541c6460a","Type":"ContainerStarted","Data":"d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a"} Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.738467 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.738314 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="sg-core" containerID="cri-o://f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324" gracePeriod=30 Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.738338 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="ceilometer-notification-agent" containerID="cri-o://4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36" gracePeriod=30 Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.738365 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="proxy-httpd" containerID="cri-o://d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a" gracePeriod=30 Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.738073 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="ceilometer-central-agent" containerID="cri-o://b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b" gracePeriod=30 Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.740362 4932 generic.go:334] "Generic (PLEG): container finished" podID="d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" containerID="95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0" exitCode=0 Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.740430 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-77fc89ff58-gpqc5" event={"ID":"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac","Type":"ContainerDied","Data":"95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0"} Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.750008 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7865402-5a21-44f9-9436-d5d1bab67a07","Type":"ContainerStarted","Data":"80df99d51a793387f4befd153965af902fa51eff5beea4589846bd522aef8f83"} Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.750776 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.767564 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.467463405 podStartE2EDuration="5.766185027s" podCreationTimestamp="2025-11-25 09:10:31 +0000 UTC" firstStartedPulling="2025-11-25 09:10:32.648851672 +0000 UTC m=+1292.774881235" lastFinishedPulling="2025-11-25 09:10:35.947573294 +0000 UTC m=+1296.073602857" observedRunningTime="2025-11-25 09:10:36.758517982 +0000 UTC m=+1296.884547555" watchObservedRunningTime="2025-11-25 09:10:36.766185027 +0000 UTC m=+1296.892214590" Nov 25 09:10:36 crc kubenswrapper[4932]: I1125 09:10:36.792556 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.7925368600000002 podStartE2EDuration="3.79253686s" podCreationTimestamp="2025-11-25 09:10:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:10:36.781464705 +0000 UTC m=+1296.907494268" watchObservedRunningTime="2025-11-25 09:10:36.79253686 +0000 UTC m=+1296.918566423" Nov 25 09:10:37 crc kubenswrapper[4932]: E1125 09:10:37.049480 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:37 crc kubenswrapper[4932]: E1125 09:10:37.055772 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:37 crc kubenswrapper[4932]: E1125 09:10:37.058977 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:37 crc kubenswrapper[4932]: E1125 09:10:37.059031 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="06f961a8-19e0-4902-9519-984f0d7bd4cd" containerName="nova-cell0-conductor-conductor" Nov 25 09:10:37 crc kubenswrapper[4932]: I1125 09:10:37.181280 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:10:37 crc kubenswrapper[4932]: I1125 09:10:37.181365 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:10:37 crc kubenswrapper[4932]: I1125 09:10:37.761011 4932 generic.go:334] "Generic (PLEG): container finished" podID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerID="d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a" exitCode=0 Nov 25 09:10:37 crc kubenswrapper[4932]: I1125 09:10:37.761361 4932 generic.go:334] "Generic (PLEG): container finished" podID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerID="f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324" exitCode=2 Nov 25 09:10:37 crc kubenswrapper[4932]: I1125 09:10:37.761373 4932 generic.go:334] "Generic (PLEG): container finished" podID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerID="4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36" exitCode=0 Nov 25 09:10:37 crc kubenswrapper[4932]: I1125 09:10:37.762338 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e381bcc8-6d35-4692-b76a-a28541c6460a","Type":"ContainerDied","Data":"d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a"} Nov 25 09:10:37 crc kubenswrapper[4932]: I1125 09:10:37.762375 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e381bcc8-6d35-4692-b76a-a28541c6460a","Type":"ContainerDied","Data":"f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324"} Nov 25 09:10:37 crc kubenswrapper[4932]: I1125 09:10:37.762392 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e381bcc8-6d35-4692-b76a-a28541c6460a","Type":"ContainerDied","Data":"4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36"} Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.438339 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.541483 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-config-data\") pod \"e381bcc8-6d35-4692-b76a-a28541c6460a\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.541595 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-run-httpd\") pod \"e381bcc8-6d35-4692-b76a-a28541c6460a\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.541671 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gx7ts\" (UniqueName: \"kubernetes.io/projected/e381bcc8-6d35-4692-b76a-a28541c6460a-kube-api-access-gx7ts\") pod \"e381bcc8-6d35-4692-b76a-a28541c6460a\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.541756 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-combined-ca-bundle\") pod \"e381bcc8-6d35-4692-b76a-a28541c6460a\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.541780 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-scripts\") pod \"e381bcc8-6d35-4692-b76a-a28541c6460a\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.541815 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-sg-core-conf-yaml\") pod \"e381bcc8-6d35-4692-b76a-a28541c6460a\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.541832 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-log-httpd\") pod \"e381bcc8-6d35-4692-b76a-a28541c6460a\" (UID: \"e381bcc8-6d35-4692-b76a-a28541c6460a\") " Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.542687 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e381bcc8-6d35-4692-b76a-a28541c6460a" (UID: "e381bcc8-6d35-4692-b76a-a28541c6460a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.543704 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e381bcc8-6d35-4692-b76a-a28541c6460a" (UID: "e381bcc8-6d35-4692-b76a-a28541c6460a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.548379 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-scripts" (OuterVolumeSpecName: "scripts") pod "e381bcc8-6d35-4692-b76a-a28541c6460a" (UID: "e381bcc8-6d35-4692-b76a-a28541c6460a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.552429 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e381bcc8-6d35-4692-b76a-a28541c6460a-kube-api-access-gx7ts" (OuterVolumeSpecName: "kube-api-access-gx7ts") pod "e381bcc8-6d35-4692-b76a-a28541c6460a" (UID: "e381bcc8-6d35-4692-b76a-a28541c6460a"). InnerVolumeSpecName "kube-api-access-gx7ts". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.571283 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e381bcc8-6d35-4692-b76a-a28541c6460a" (UID: "e381bcc8-6d35-4692-b76a-a28541c6460a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.636016 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e381bcc8-6d35-4692-b76a-a28541c6460a" (UID: "e381bcc8-6d35-4692-b76a-a28541c6460a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.644516 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.644553 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gx7ts\" (UniqueName: \"kubernetes.io/projected/e381bcc8-6d35-4692-b76a-a28541c6460a-kube-api-access-gx7ts\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.644571 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.644586 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.644600 4932 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.644610 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e381bcc8-6d35-4692-b76a-a28541c6460a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.647504 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-config-data" (OuterVolumeSpecName: "config-data") pod "e381bcc8-6d35-4692-b76a-a28541c6460a" (UID: "e381bcc8-6d35-4692-b76a-a28541c6460a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.746777 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e381bcc8-6d35-4692-b76a-a28541c6460a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.786493 4932 generic.go:334] "Generic (PLEG): container finished" podID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerID="b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b" exitCode=0 Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.786543 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e381bcc8-6d35-4692-b76a-a28541c6460a","Type":"ContainerDied","Data":"b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b"} Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.786575 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.786617 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e381bcc8-6d35-4692-b76a-a28541c6460a","Type":"ContainerDied","Data":"844cefb582fcaf48d19dc6063778e8cfa4defad23c3b7fc92e190e9e64ebea8f"} Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.786650 4932 scope.go:117] "RemoveContainer" containerID="d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.825247 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.829301 4932 scope.go:117] "RemoveContainer" containerID="f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.834243 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.849206 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:39 crc kubenswrapper[4932]: E1125 09:10:39.849575 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="sg-core" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.849585 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="sg-core" Nov 25 09:10:39 crc kubenswrapper[4932]: E1125 09:10:39.849601 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="ceilometer-notification-agent" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.849607 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="ceilometer-notification-agent" Nov 25 09:10:39 crc kubenswrapper[4932]: E1125 09:10:39.849622 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="ceilometer-central-agent" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.849628 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="ceilometer-central-agent" Nov 25 09:10:39 crc kubenswrapper[4932]: E1125 09:10:39.849643 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="proxy-httpd" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.849648 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="proxy-httpd" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.849825 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="sg-core" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.849838 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="ceilometer-central-agent" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.849854 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="ceilometer-notification-agent" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.849869 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" containerName="proxy-httpd" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.851424 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.855056 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.855237 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.862422 4932 scope.go:117] "RemoveContainer" containerID="4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.873384 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.899986 4932 scope.go:117] "RemoveContainer" containerID="b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.919069 4932 scope.go:117] "RemoveContainer" containerID="d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a" Nov 25 09:10:39 crc kubenswrapper[4932]: E1125 09:10:39.920008 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a\": container with ID starting with d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a not found: ID does not exist" containerID="d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.920042 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a"} err="failed to get container status \"d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a\": rpc error: code = NotFound desc = could not find container \"d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a\": container with ID starting with d0e630768ae946125640026338bbdb6f59ba10a05aab8f8261cd1c32440be02a not found: ID does not exist" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.920063 4932 scope.go:117] "RemoveContainer" containerID="f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324" Nov 25 09:10:39 crc kubenswrapper[4932]: E1125 09:10:39.920349 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324\": container with ID starting with f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324 not found: ID does not exist" containerID="f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.920397 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324"} err="failed to get container status \"f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324\": rpc error: code = NotFound desc = could not find container \"f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324\": container with ID starting with f7cdaa433f077ea95c9dd74b4445e57b45a2e9b27bcb534e72f19db4a12de324 not found: ID does not exist" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.920435 4932 scope.go:117] "RemoveContainer" containerID="4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36" Nov 25 09:10:39 crc kubenswrapper[4932]: E1125 09:10:39.920718 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36\": container with ID starting with 4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36 not found: ID does not exist" containerID="4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.920811 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36"} err="failed to get container status \"4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36\": rpc error: code = NotFound desc = could not find container \"4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36\": container with ID starting with 4cc6f2a0c0ea6b6318a9796719848f90d39c89ed58234d31ce964a01471ced36 not found: ID does not exist" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.920849 4932 scope.go:117] "RemoveContainer" containerID="b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b" Nov 25 09:10:39 crc kubenswrapper[4932]: E1125 09:10:39.921145 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b\": container with ID starting with b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b not found: ID does not exist" containerID="b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.921171 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b"} err="failed to get container status \"b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b\": rpc error: code = NotFound desc = could not find container \"b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b\": container with ID starting with b77a620f08d56e3e84d8c02912c03462b1772f16f51d6c7334d48b8b3629357b not found: ID does not exist" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.949341 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-config-data\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.949382 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-run-httpd\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.949442 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqs7m\" (UniqueName: \"kubernetes.io/projected/fc930622-fe16-4fff-8a37-419cbafa39dd-kube-api-access-fqs7m\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.949532 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.949557 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-scripts\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.949600 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-log-httpd\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:39 crc kubenswrapper[4932]: I1125 09:10:39.949618 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.050893 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-config-data\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.050961 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-run-httpd\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.051000 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqs7m\" (UniqueName: \"kubernetes.io/projected/fc930622-fe16-4fff-8a37-419cbafa39dd-kube-api-access-fqs7m\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.051115 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.051163 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-scripts\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.051289 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-log-httpd\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.051972 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-log-httpd\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.051322 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.052614 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-run-httpd\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.055567 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.055678 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-config-data\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.056378 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.058294 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-scripts\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.070899 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqs7m\" (UniqueName: \"kubernetes.io/projected/fc930622-fe16-4fff-8a37-419cbafa39dd-kube-api-access-fqs7m\") pod \"ceilometer-0\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.191615 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.623477 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e381bcc8-6d35-4692-b76a-a28541c6460a" path="/var/lib/kubelet/pods/e381bcc8-6d35-4692-b76a-a28541c6460a/volumes" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.668228 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.669357 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.670125 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:10:40 crc kubenswrapper[4932]: I1125 09:10:40.795057 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc930622-fe16-4fff-8a37-419cbafa39dd","Type":"ContainerStarted","Data":"58fa221680a60e121ec7f0b3aeb5642cb424bdc33b54cfeeb2b590d32dc52905"} Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.657723 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.782628 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-combined-ca-bundle\") pod \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.782677 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-ovndb-tls-certs\") pod \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.782755 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-config\") pod \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.782797 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-httpd-config\") pod \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.782896 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfjdv\" (UniqueName: \"kubernetes.io/projected/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-kube-api-access-pfjdv\") pod \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\" (UID: \"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac\") " Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.787653 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-kube-api-access-pfjdv" (OuterVolumeSpecName: "kube-api-access-pfjdv") pod "d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" (UID: "d3e7db17-a5ee-49d9-80d5-bc444a5d66ac"). InnerVolumeSpecName "kube-api-access-pfjdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.787692 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" (UID: "d3e7db17-a5ee-49d9-80d5-bc444a5d66ac"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.809888 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc930622-fe16-4fff-8a37-419cbafa39dd","Type":"ContainerStarted","Data":"e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31"} Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.812605 4932 generic.go:334] "Generic (PLEG): container finished" podID="d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" containerID="a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9" exitCode=0 Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.812637 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-77fc89ff58-gpqc5" event={"ID":"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac","Type":"ContainerDied","Data":"a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9"} Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.812704 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-77fc89ff58-gpqc5" event={"ID":"d3e7db17-a5ee-49d9-80d5-bc444a5d66ac","Type":"ContainerDied","Data":"200193e5c2f03bd1ec6656fa01d90e9754a37eb5109b7401767dfb9355b7089b"} Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.812707 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-77fc89ff58-gpqc5" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.812723 4932 scope.go:117] "RemoveContainer" containerID="95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.847516 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" (UID: "d3e7db17-a5ee-49d9-80d5-bc444a5d66ac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.849535 4932 scope.go:117] "RemoveContainer" containerID="a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.876594 4932 scope.go:117] "RemoveContainer" containerID="95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.877071 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-config" (OuterVolumeSpecName: "config") pod "d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" (UID: "d3e7db17-a5ee-49d9-80d5-bc444a5d66ac"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:41 crc kubenswrapper[4932]: E1125 09:10:41.877105 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0\": container with ID starting with 95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0 not found: ID does not exist" containerID="95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.877137 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0"} err="failed to get container status \"95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0\": rpc error: code = NotFound desc = could not find container \"95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0\": container with ID starting with 95edf8265c19345c985b51c9bd17907d1a94fdfefc352a2e6e850c776971efd0 not found: ID does not exist" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.877239 4932 scope.go:117] "RemoveContainer" containerID="a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9" Nov 25 09:10:41 crc kubenswrapper[4932]: E1125 09:10:41.877688 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9\": container with ID starting with a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9 not found: ID does not exist" containerID="a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.877724 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9"} err="failed to get container status \"a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9\": rpc error: code = NotFound desc = could not find container \"a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9\": container with ID starting with a90b3f2fb708dd2b8f53c3813629f59b85d3702bc43a19649e000b2dfaf504e9 not found: ID does not exist" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.886648 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.886684 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.886698 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfjdv\" (UniqueName: \"kubernetes.io/projected/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-kube-api-access-pfjdv\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.886710 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.903132 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" (UID: "d3e7db17-a5ee-49d9-80d5-bc444a5d66ac"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:10:41 crc kubenswrapper[4932]: I1125 09:10:41.988568 4932 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:10:42 crc kubenswrapper[4932]: E1125 09:10:42.049262 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:42 crc kubenswrapper[4932]: E1125 09:10:42.051133 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:42 crc kubenswrapper[4932]: E1125 09:10:42.053529 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:42 crc kubenswrapper[4932]: E1125 09:10:42.053592 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="06f961a8-19e0-4902-9519-984f0d7bd4cd" containerName="nova-cell0-conductor-conductor" Nov 25 09:10:42 crc kubenswrapper[4932]: I1125 09:10:42.195900 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-77fc89ff58-gpqc5"] Nov 25 09:10:42 crc kubenswrapper[4932]: I1125 09:10:42.210407 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-77fc89ff58-gpqc5"] Nov 25 09:10:42 crc kubenswrapper[4932]: I1125 09:10:42.617330 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" path="/var/lib/kubelet/pods/d3e7db17-a5ee-49d9-80d5-bc444a5d66ac/volumes" Nov 25 09:10:42 crc kubenswrapper[4932]: I1125 09:10:42.825542 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc930622-fe16-4fff-8a37-419cbafa39dd","Type":"ContainerStarted","Data":"38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8"} Nov 25 09:10:43 crc kubenswrapper[4932]: I1125 09:10:43.836893 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc930622-fe16-4fff-8a37-419cbafa39dd","Type":"ContainerStarted","Data":"44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701"} Nov 25 09:10:44 crc kubenswrapper[4932]: I1125 09:10:44.847368 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc930622-fe16-4fff-8a37-419cbafa39dd","Type":"ContainerStarted","Data":"56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd"} Nov 25 09:10:44 crc kubenswrapper[4932]: I1125 09:10:44.849321 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:10:44 crc kubenswrapper[4932]: I1125 09:10:44.906858 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.707386928 podStartE2EDuration="5.906841483s" podCreationTimestamp="2025-11-25 09:10:39 +0000 UTC" firstStartedPulling="2025-11-25 09:10:40.671442418 +0000 UTC m=+1300.797471981" lastFinishedPulling="2025-11-25 09:10:43.870896983 +0000 UTC m=+1303.996926536" observedRunningTime="2025-11-25 09:10:44.901822447 +0000 UTC m=+1305.027852010" watchObservedRunningTime="2025-11-25 09:10:44.906841483 +0000 UTC m=+1305.032871046" Nov 25 09:10:45 crc kubenswrapper[4932]: I1125 09:10:45.879143 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 09:10:47 crc kubenswrapper[4932]: E1125 09:10:47.049695 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:47 crc kubenswrapper[4932]: E1125 09:10:47.058159 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:47 crc kubenswrapper[4932]: E1125 09:10:47.059619 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:47 crc kubenswrapper[4932]: E1125 09:10:47.059685 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="06f961a8-19e0-4902-9519-984f0d7bd4cd" containerName="nova-cell0-conductor-conductor" Nov 25 09:10:52 crc kubenswrapper[4932]: E1125 09:10:52.050962 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:52 crc kubenswrapper[4932]: E1125 09:10:52.053771 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:52 crc kubenswrapper[4932]: E1125 09:10:52.058549 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:52 crc kubenswrapper[4932]: E1125 09:10:52.058664 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="06f961a8-19e0-4902-9519-984f0d7bd4cd" containerName="nova-cell0-conductor-conductor" Nov 25 09:10:57 crc kubenswrapper[4932]: E1125 09:10:57.049200 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:57 crc kubenswrapper[4932]: E1125 09:10:57.051930 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:57 crc kubenswrapper[4932]: E1125 09:10:57.053102 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:10:57 crc kubenswrapper[4932]: E1125 09:10:57.053141 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="06f961a8-19e0-4902-9519-984f0d7bd4cd" containerName="nova-cell0-conductor-conductor" Nov 25 09:11:02 crc kubenswrapper[4932]: E1125 09:11:02.049131 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:11:02 crc kubenswrapper[4932]: E1125 09:11:02.051240 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:11:02 crc kubenswrapper[4932]: E1125 09:11:02.052533 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:11:02 crc kubenswrapper[4932]: E1125 09:11:02.052601 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="06f961a8-19e0-4902-9519-984f0d7bd4cd" containerName="nova-cell0-conductor-conductor" Nov 25 09:11:04 crc kubenswrapper[4932]: I1125 09:11:04.132031 4932 generic.go:334] "Generic (PLEG): container finished" podID="06f961a8-19e0-4902-9519-984f0d7bd4cd" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" exitCode=137 Nov 25 09:11:04 crc kubenswrapper[4932]: I1125 09:11:04.132110 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"06f961a8-19e0-4902-9519-984f0d7bd4cd","Type":"ContainerDied","Data":"9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff"} Nov 25 09:11:04 crc kubenswrapper[4932]: I1125 09:11:04.711982 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:04 crc kubenswrapper[4932]: I1125 09:11:04.901031 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-config-data\") pod \"06f961a8-19e0-4902-9519-984f0d7bd4cd\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " Nov 25 09:11:04 crc kubenswrapper[4932]: I1125 09:11:04.901092 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vc966\" (UniqueName: \"kubernetes.io/projected/06f961a8-19e0-4902-9519-984f0d7bd4cd-kube-api-access-vc966\") pod \"06f961a8-19e0-4902-9519-984f0d7bd4cd\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " Nov 25 09:11:04 crc kubenswrapper[4932]: I1125 09:11:04.901403 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-combined-ca-bundle\") pod \"06f961a8-19e0-4902-9519-984f0d7bd4cd\" (UID: \"06f961a8-19e0-4902-9519-984f0d7bd4cd\") " Nov 25 09:11:04 crc kubenswrapper[4932]: I1125 09:11:04.912334 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06f961a8-19e0-4902-9519-984f0d7bd4cd-kube-api-access-vc966" (OuterVolumeSpecName: "kube-api-access-vc966") pod "06f961a8-19e0-4902-9519-984f0d7bd4cd" (UID: "06f961a8-19e0-4902-9519-984f0d7bd4cd"). InnerVolumeSpecName "kube-api-access-vc966". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:11:04 crc kubenswrapper[4932]: I1125 09:11:04.930932 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "06f961a8-19e0-4902-9519-984f0d7bd4cd" (UID: "06f961a8-19e0-4902-9519-984f0d7bd4cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:04 crc kubenswrapper[4932]: I1125 09:11:04.937587 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-config-data" (OuterVolumeSpecName: "config-data") pod "06f961a8-19e0-4902-9519-984f0d7bd4cd" (UID: "06f961a8-19e0-4902-9519-984f0d7bd4cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.003102 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.004379 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vc966\" (UniqueName: \"kubernetes.io/projected/06f961a8-19e0-4902-9519-984f0d7bd4cd-kube-api-access-vc966\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.004422 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f961a8-19e0-4902-9519-984f0d7bd4cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.148409 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"06f961a8-19e0-4902-9519-984f0d7bd4cd","Type":"ContainerDied","Data":"4a64e4864bb753982ace7144ddfc8e48f6f8490c9332c814afa38126e5999f9c"} Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.148490 4932 scope.go:117] "RemoveContainer" containerID="9ae4961ea1aaf4f600526c48abb8420fcc5d68da16e721e3bc08870edcaf53ff" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.148505 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.197706 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.214651 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.230026 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:11:05 crc kubenswrapper[4932]: E1125 09:11:05.230444 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" containerName="neutron-httpd" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.230463 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" containerName="neutron-httpd" Nov 25 09:11:05 crc kubenswrapper[4932]: E1125 09:11:05.230476 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06f961a8-19e0-4902-9519-984f0d7bd4cd" containerName="nova-cell0-conductor-conductor" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.230483 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="06f961a8-19e0-4902-9519-984f0d7bd4cd" containerName="nova-cell0-conductor-conductor" Nov 25 09:11:05 crc kubenswrapper[4932]: E1125 09:11:05.231327 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" containerName="neutron-api" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.231350 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" containerName="neutron-api" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.231557 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" containerName="neutron-api" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.231578 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3e7db17-a5ee-49d9-80d5-bc444a5d66ac" containerName="neutron-httpd" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.231598 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="06f961a8-19e0-4902-9519-984f0d7bd4cd" containerName="nova-cell0-conductor-conductor" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.232272 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.240960 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.242891 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-tp97l" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.243027 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.314234 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.314310 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.314368 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cg6ql\" (UniqueName: \"kubernetes.io/projected/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-kube-api-access-cg6ql\") pod \"nova-cell0-conductor-0\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.416033 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cg6ql\" (UniqueName: \"kubernetes.io/projected/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-kube-api-access-cg6ql\") pod \"nova-cell0-conductor-0\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.416200 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.416246 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.422121 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.424656 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.437500 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cg6ql\" (UniqueName: \"kubernetes.io/projected/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-kube-api-access-cg6ql\") pod \"nova-cell0-conductor-0\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.560933 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:05 crc kubenswrapper[4932]: I1125 09:11:05.980000 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:11:05 crc kubenswrapper[4932]: W1125 09:11:05.987626 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb57cfb59_e562_4fb2_bfad_b4cf5382c45a.slice/crio-bbfd84cd76872f9375cfcf4c9cf8aecdca57dd6170845385f12124b333c03b35 WatchSource:0}: Error finding container bbfd84cd76872f9375cfcf4c9cf8aecdca57dd6170845385f12124b333c03b35: Status 404 returned error can't find the container with id bbfd84cd76872f9375cfcf4c9cf8aecdca57dd6170845385f12124b333c03b35 Nov 25 09:11:06 crc kubenswrapper[4932]: I1125 09:11:06.159262 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"b57cfb59-e562-4fb2-bfad-b4cf5382c45a","Type":"ContainerStarted","Data":"bbfd84cd76872f9375cfcf4c9cf8aecdca57dd6170845385f12124b333c03b35"} Nov 25 09:11:06 crc kubenswrapper[4932]: I1125 09:11:06.616005 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06f961a8-19e0-4902-9519-984f0d7bd4cd" path="/var/lib/kubelet/pods/06f961a8-19e0-4902-9519-984f0d7bd4cd/volumes" Nov 25 09:11:07 crc kubenswrapper[4932]: I1125 09:11:07.171940 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"b57cfb59-e562-4fb2-bfad-b4cf5382c45a","Type":"ContainerStarted","Data":"75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d"} Nov 25 09:11:07 crc kubenswrapper[4932]: I1125 09:11:07.172889 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:07 crc kubenswrapper[4932]: I1125 09:11:07.181078 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:11:07 crc kubenswrapper[4932]: I1125 09:11:07.181117 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:11:07 crc kubenswrapper[4932]: I1125 09:11:07.191507 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.191490932 podStartE2EDuration="2.191490932s" podCreationTimestamp="2025-11-25 09:11:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:11:07.191091359 +0000 UTC m=+1327.317120922" watchObservedRunningTime="2025-11-25 09:11:07.191490932 +0000 UTC m=+1327.317520495" Nov 25 09:11:10 crc kubenswrapper[4932]: I1125 09:11:10.200585 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 09:11:13 crc kubenswrapper[4932]: I1125 09:11:13.998694 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:11:14 crc kubenswrapper[4932]: I1125 09:11:14.000246 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="5183d0c7-226f-4f06-9687-82b0c0269a5d" containerName="kube-state-metrics" containerID="cri-o://e9ffc1df49b2e958be7f09a6213f1511293f7f7b12bb6eeba3a2baf502b8d076" gracePeriod=30 Nov 25 09:11:14 crc kubenswrapper[4932]: I1125 09:11:14.236965 4932 generic.go:334] "Generic (PLEG): container finished" podID="5183d0c7-226f-4f06-9687-82b0c0269a5d" containerID="e9ffc1df49b2e958be7f09a6213f1511293f7f7b12bb6eeba3a2baf502b8d076" exitCode=2 Nov 25 09:11:14 crc kubenswrapper[4932]: I1125 09:11:14.237343 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5183d0c7-226f-4f06-9687-82b0c0269a5d","Type":"ContainerDied","Data":"e9ffc1df49b2e958be7f09a6213f1511293f7f7b12bb6eeba3a2baf502b8d076"} Nov 25 09:11:14 crc kubenswrapper[4932]: I1125 09:11:14.533509 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:11:14 crc kubenswrapper[4932]: I1125 09:11:14.704984 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqz6s\" (UniqueName: \"kubernetes.io/projected/5183d0c7-226f-4f06-9687-82b0c0269a5d-kube-api-access-zqz6s\") pod \"5183d0c7-226f-4f06-9687-82b0c0269a5d\" (UID: \"5183d0c7-226f-4f06-9687-82b0c0269a5d\") " Nov 25 09:11:14 crc kubenswrapper[4932]: I1125 09:11:14.732605 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5183d0c7-226f-4f06-9687-82b0c0269a5d-kube-api-access-zqz6s" (OuterVolumeSpecName: "kube-api-access-zqz6s") pod "5183d0c7-226f-4f06-9687-82b0c0269a5d" (UID: "5183d0c7-226f-4f06-9687-82b0c0269a5d"). InnerVolumeSpecName "kube-api-access-zqz6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:11:14 crc kubenswrapper[4932]: I1125 09:11:14.808813 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqz6s\" (UniqueName: \"kubernetes.io/projected/5183d0c7-226f-4f06-9687-82b0c0269a5d-kube-api-access-zqz6s\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.248448 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5183d0c7-226f-4f06-9687-82b0c0269a5d","Type":"ContainerDied","Data":"a98689e4c03aa93ce7b0b315d7b88b982013d69640532d6faa686912be1e18b4"} Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.248516 4932 scope.go:117] "RemoveContainer" containerID="e9ffc1df49b2e958be7f09a6213f1511293f7f7b12bb6eeba3a2baf502b8d076" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.248567 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.292363 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.299885 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.316014 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:11:15 crc kubenswrapper[4932]: E1125 09:11:15.316456 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5183d0c7-226f-4f06-9687-82b0c0269a5d" containerName="kube-state-metrics" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.316480 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5183d0c7-226f-4f06-9687-82b0c0269a5d" containerName="kube-state-metrics" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.316693 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="5183d0c7-226f-4f06-9687-82b0c0269a5d" containerName="kube-state-metrics" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.317398 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.322717 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.322970 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.329452 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.518893 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.519006 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.519119 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.519178 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s57q6\" (UniqueName: \"kubernetes.io/projected/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-api-access-s57q6\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.596847 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.601626 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.601928 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="ceilometer-central-agent" containerID="cri-o://e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31" gracePeriod=30 Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.602025 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="ceilometer-notification-agent" containerID="cri-o://38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8" gracePeriod=30 Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.602045 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="proxy-httpd" containerID="cri-o://56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd" gracePeriod=30 Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.604306 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="sg-core" containerID="cri-o://44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701" gracePeriod=30 Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.625281 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.625339 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.625399 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.625428 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s57q6\" (UniqueName: \"kubernetes.io/projected/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-api-access-s57q6\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.631612 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.632916 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.636345 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.649829 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s57q6\" (UniqueName: \"kubernetes.io/projected/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-api-access-s57q6\") pod \"kube-state-metrics-0\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " pod="openstack/kube-state-metrics-0" Nov 25 09:11:15 crc kubenswrapper[4932]: I1125 09:11:15.942087 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.266141 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerID="56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd" exitCode=0 Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.268132 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerID="44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701" exitCode=2 Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.268150 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerID="e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31" exitCode=0 Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.268253 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc930622-fe16-4fff-8a37-419cbafa39dd","Type":"ContainerDied","Data":"56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd"} Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.268320 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc930622-fe16-4fff-8a37-419cbafa39dd","Type":"ContainerDied","Data":"44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701"} Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.268335 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc930622-fe16-4fff-8a37-419cbafa39dd","Type":"ContainerDied","Data":"e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31"} Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.369168 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-dnps4"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.370580 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.378776 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.380776 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.393695 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-dnps4"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.432051 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.448335 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-config-data\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.448369 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcpl8\" (UniqueName: \"kubernetes.io/projected/a24ef146-76b6-4034-afce-fa2e2c94e641-kube-api-access-zcpl8\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.448545 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.448590 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-scripts\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.460665 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.549698 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.554105 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-config-data\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.554158 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcpl8\" (UniqueName: \"kubernetes.io/projected/a24ef146-76b6-4034-afce-fa2e2c94e641-kube-api-access-zcpl8\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.554412 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.554449 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-scripts\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.555520 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.563701 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.565236 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.566506 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-scripts\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.570861 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-config-data\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.572776 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.586971 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.589789 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.594725 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcpl8\" (UniqueName: \"kubernetes.io/projected/a24ef146-76b6-4034-afce-fa2e2c94e641-kube-api-access-zcpl8\") pod \"nova-cell0-cell-mapping-dnps4\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.613934 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.638325 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5183d0c7-226f-4f06-9687-82b0c0269a5d" path="/var/lib/kubelet/pods/5183d0c7-226f-4f06-9687-82b0c0269a5d/volumes" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.656472 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5f70393-a9e1-4c5b-8729-81051eb638b0-logs\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.656532 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f4nq\" (UniqueName: \"kubernetes.io/projected/b5f70393-a9e1-4c5b-8729-81051eb638b0-kube-api-access-4f4nq\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.656662 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-config-data\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.656698 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.696958 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.697543 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.716414 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.718120 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.732437 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.756862 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.757860 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqg66\" (UniqueName: \"kubernetes.io/projected/8af5979c-58ee-4098-9e8d-06cf64551372-kube-api-access-nqg66\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.757900 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-config-data\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.757929 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.757985 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8af5979c-58ee-4098-9e8d-06cf64551372-logs\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.758014 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-config-data\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.758031 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.758054 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5f70393-a9e1-4c5b-8729-81051eb638b0-logs\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.758076 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f4nq\" (UniqueName: \"kubernetes.io/projected/b5f70393-a9e1-4c5b-8729-81051eb638b0-kube-api-access-4f4nq\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.765296 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5f70393-a9e1-4c5b-8729-81051eb638b0-logs\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.767664 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-config-data\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.770019 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.787734 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f4nq\" (UniqueName: \"kubernetes.io/projected/b5f70393-a9e1-4c5b-8729-81051eb638b0-kube-api-access-4f4nq\") pod \"nova-api-0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.798615 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.800365 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.802222 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.820752 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-bw9mt"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.829079 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.843890 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.859357 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8af5979c-58ee-4098-9e8d-06cf64551372-logs\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.859612 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-config-data\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.859702 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.859852 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.860598 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqx6c\" (UniqueName: \"kubernetes.io/projected/b071159f-2eb5-44b8-90da-1b9605430aea-kube-api-access-rqx6c\") pod \"nova-cell1-novncproxy-0\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.860743 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqg66\" (UniqueName: \"kubernetes.io/projected/8af5979c-58ee-4098-9e8d-06cf64551372-kube-api-access-nqg66\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.860973 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.861753 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-bw9mt"] Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.862171 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8af5979c-58ee-4098-9e8d-06cf64551372-logs\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.868714 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.872533 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-config-data\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.890045 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqg66\" (UniqueName: \"kubernetes.io/projected/8af5979c-58ee-4098-9e8d-06cf64551372-kube-api-access-nqg66\") pod \"nova-metadata-0\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.920791 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.949285 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970130 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-config\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970244 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-nb\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970268 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqm2z\" (UniqueName: \"kubernetes.io/projected/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-kube-api-access-pqm2z\") pod \"nova-scheduler-0\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970285 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-swift-storage-0\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970307 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5b7s\" (UniqueName: \"kubernetes.io/projected/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-kube-api-access-v5b7s\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970323 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970352 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970431 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970469 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-svc\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970506 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-sb\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970527 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqx6c\" (UniqueName: \"kubernetes.io/projected/b071159f-2eb5-44b8-90da-1b9605430aea-kube-api-access-rqx6c\") pod \"nova-cell1-novncproxy-0\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.970571 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-config-data\") pod \"nova-scheduler-0\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.986743 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:16 crc kubenswrapper[4932]: I1125 09:11:16.986936 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.014504 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqx6c\" (UniqueName: \"kubernetes.io/projected/b071159f-2eb5-44b8-90da-1b9605430aea-kube-api-access-rqx6c\") pod \"nova-cell1-novncproxy-0\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.079647 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-svc\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.080601 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-svc\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.080752 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-sb\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.080873 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-config-data\") pod \"nova-scheduler-0\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.081274 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-config\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.081377 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-nb\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.081411 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqm2z\" (UniqueName: \"kubernetes.io/projected/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-kube-api-access-pqm2z\") pod \"nova-scheduler-0\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.081443 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-swift-storage-0\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.081482 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5b7s\" (UniqueName: \"kubernetes.io/projected/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-kube-api-access-v5b7s\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.081507 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.082449 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-config\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.082554 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-nb\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.082950 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-swift-storage-0\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.083038 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-sb\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.084827 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-config-data\") pod \"nova-scheduler-0\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.090808 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.100090 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqm2z\" (UniqueName: \"kubernetes.io/projected/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-kube-api-access-pqm2z\") pod \"nova-scheduler-0\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.104177 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5b7s\" (UniqueName: \"kubernetes.io/projected/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-kube-api-access-v5b7s\") pod \"dnsmasq-dns-64dbf5859c-bw9mt\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.290665 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-dnps4"] Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.291776 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad","Type":"ContainerStarted","Data":"41c91b80b3392c4827df6a0af10ff1bbc06b12abdc9d85527024976e4de2ee8c"} Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.293201 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:17 crc kubenswrapper[4932]: W1125 09:11:17.295066 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda24ef146_76b6_4034_afce_fa2e2c94e641.slice/crio-a8dc641b731782387afcc59b08aa43ec34b1ed7653e08f7330974fad72d5ae2f WatchSource:0}: Error finding container a8dc641b731782387afcc59b08aa43ec34b1ed7653e08f7330974fad72d5ae2f: Status 404 returned error can't find the container with id a8dc641b731782387afcc59b08aa43ec34b1ed7653e08f7330974fad72d5ae2f Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.314136 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.320456 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.393357 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-ln6w4"] Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.395068 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.397478 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.402401 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-ln6w4"] Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.402884 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.497180 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2n6f\" (UniqueName: \"kubernetes.io/projected/92f23c40-bf12-4901-8f08-5d306bab0cef-kube-api-access-t2n6f\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.497719 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-scripts\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.497813 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-config-data\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.497886 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.599157 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.599299 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2n6f\" (UniqueName: \"kubernetes.io/projected/92f23c40-bf12-4901-8f08-5d306bab0cef-kube-api-access-t2n6f\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.599396 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-scripts\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.599453 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-config-data\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.604539 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-scripts\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.605087 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-config-data\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.605370 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.615086 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2n6f\" (UniqueName: \"kubernetes.io/projected/92f23c40-bf12-4901-8f08-5d306bab0cef-kube-api-access-t2n6f\") pod \"nova-cell1-conductor-db-sync-ln6w4\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: W1125 09:11:17.615953 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5f70393_a9e1_4c5b_8729_81051eb638b0.slice/crio-43c4b6ebe40ccf68a9e372a3c33793d6a9d8bae4f1164473836c3f226fa77595 WatchSource:0}: Error finding container 43c4b6ebe40ccf68a9e372a3c33793d6a9d8bae4f1164473836c3f226fa77595: Status 404 returned error can't find the container with id 43c4b6ebe40ccf68a9e372a3c33793d6a9d8bae4f1164473836c3f226fa77595 Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.680487 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.689776 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.793165 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.902942 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.912143 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:11:17 crc kubenswrapper[4932]: W1125 09:11:17.921457 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb071159f_2eb5_44b8_90da_1b9605430aea.slice/crio-0a165f3e5d46d4e3c385987800722a422752ead8836532d64a4186c1c607d2fe WatchSource:0}: Error finding container 0a165f3e5d46d4e3c385987800722a422752ead8836532d64a4186c1c607d2fe: Status 404 returned error can't find the container with id 0a165f3e5d46d4e3c385987800722a422752ead8836532d64a4186c1c607d2fe Nov 25 09:11:17 crc kubenswrapper[4932]: I1125 09:11:17.921849 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-bw9mt"] Nov 25 09:11:17 crc kubenswrapper[4932]: W1125 09:11:17.923039 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f0f9e33_5d64_4861_ae33_c6e01ca8af78.slice/crio-ded55ae0e220180211aba9b0c0c19c1ae4bbe2b11f730949917a0e2cd04fd273 WatchSource:0}: Error finding container ded55ae0e220180211aba9b0c0c19c1ae4bbe2b11f730949917a0e2cd04fd273: Status 404 returned error can't find the container with id ded55ae0e220180211aba9b0c0c19c1ae4bbe2b11f730949917a0e2cd04fd273 Nov 25 09:11:17 crc kubenswrapper[4932]: W1125 09:11:17.923469 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda378a29_86a1_4b28_ba5e_37ec4f3e3fc8.slice/crio-118a8255d787fb7000280b5fd9f6a092c9aac11b18f00f35e1946b7c163836ac WatchSource:0}: Error finding container 118a8255d787fb7000280b5fd9f6a092c9aac11b18f00f35e1946b7c163836ac: Status 404 returned error can't find the container with id 118a8255d787fb7000280b5fd9f6a092c9aac11b18f00f35e1946b7c163836ac Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.301382 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6f0f9e33-5d64-4861-ae33-c6e01ca8af78","Type":"ContainerStarted","Data":"ded55ae0e220180211aba9b0c0c19c1ae4bbe2b11f730949917a0e2cd04fd273"} Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.303165 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad","Type":"ContainerStarted","Data":"c99b5f0370ed3831068a8fcc89de815c298aa1e1d8bcee1c409429deb6c5c99a"} Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.304257 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.306673 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b071159f-2eb5-44b8-90da-1b9605430aea","Type":"ContainerStarted","Data":"0a165f3e5d46d4e3c385987800722a422752ead8836532d64a4186c1c607d2fe"} Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.310501 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" event={"ID":"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8","Type":"ContainerStarted","Data":"f4a7675ac305600c60ae126df1a98bd624ca63d6310584a2191702cc5a4f0878"} Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.310538 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" event={"ID":"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8","Type":"ContainerStarted","Data":"118a8255d787fb7000280b5fd9f6a092c9aac11b18f00f35e1946b7c163836ac"} Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.311865 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b5f70393-a9e1-4c5b-8729-81051eb638b0","Type":"ContainerStarted","Data":"43c4b6ebe40ccf68a9e372a3c33793d6a9d8bae4f1164473836c3f226fa77595"} Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.313526 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8af5979c-58ee-4098-9e8d-06cf64551372","Type":"ContainerStarted","Data":"42cc2fa00ddc062fc7b01ae162405a4a1ccdbaeb76aceba8b4d0a5fc00d6ac34"} Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.314802 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dnps4" event={"ID":"a24ef146-76b6-4034-afce-fa2e2c94e641","Type":"ContainerStarted","Data":"756e2877e3a1a5bb86487a14267262b52d648ff3b7723eaf2254117cbadf4bb4"} Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.314835 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dnps4" event={"ID":"a24ef146-76b6-4034-afce-fa2e2c94e641","Type":"ContainerStarted","Data":"a8dc641b731782387afcc59b08aa43ec34b1ed7653e08f7330974fad72d5ae2f"} Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.334106 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.553485728 podStartE2EDuration="3.334088533s" podCreationTimestamp="2025-11-25 09:11:15 +0000 UTC" firstStartedPulling="2025-11-25 09:11:16.460413104 +0000 UTC m=+1336.586442677" lastFinishedPulling="2025-11-25 09:11:17.241015919 +0000 UTC m=+1337.367045482" observedRunningTime="2025-11-25 09:11:18.32723271 +0000 UTC m=+1338.453262273" watchObservedRunningTime="2025-11-25 09:11:18.334088533 +0000 UTC m=+1338.460118096" Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.346322 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-dnps4" podStartSLOduration=2.346303422 podStartE2EDuration="2.346303422s" podCreationTimestamp="2025-11-25 09:11:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:11:18.344554378 +0000 UTC m=+1338.470583941" watchObservedRunningTime="2025-11-25 09:11:18.346303422 +0000 UTC m=+1338.472332985" Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.373368 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-ln6w4"] Nov 25 09:11:18 crc kubenswrapper[4932]: W1125 09:11:18.379650 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod92f23c40_bf12_4901_8f08_5d306bab0cef.slice/crio-46d965acc860bbaa95da783f3474538a6b5000da2dc9e824828e6aefd4b4407e WatchSource:0}: Error finding container 46d965acc860bbaa95da783f3474538a6b5000da2dc9e824828e6aefd4b4407e: Status 404 returned error can't find the container with id 46d965acc860bbaa95da783f3474538a6b5000da2dc9e824828e6aefd4b4407e Nov 25 09:11:18 crc kubenswrapper[4932]: I1125 09:11:18.972461 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.138152 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-scripts\") pod \"fc930622-fe16-4fff-8a37-419cbafa39dd\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.139277 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-combined-ca-bundle\") pod \"fc930622-fe16-4fff-8a37-419cbafa39dd\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.139402 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-log-httpd\") pod \"fc930622-fe16-4fff-8a37-419cbafa39dd\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.139493 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-config-data\") pod \"fc930622-fe16-4fff-8a37-419cbafa39dd\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.139535 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-run-httpd\") pod \"fc930622-fe16-4fff-8a37-419cbafa39dd\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.139573 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-sg-core-conf-yaml\") pod \"fc930622-fe16-4fff-8a37-419cbafa39dd\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.139603 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqs7m\" (UniqueName: \"kubernetes.io/projected/fc930622-fe16-4fff-8a37-419cbafa39dd-kube-api-access-fqs7m\") pod \"fc930622-fe16-4fff-8a37-419cbafa39dd\" (UID: \"fc930622-fe16-4fff-8a37-419cbafa39dd\") " Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.140992 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fc930622-fe16-4fff-8a37-419cbafa39dd" (UID: "fc930622-fe16-4fff-8a37-419cbafa39dd"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.141507 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fc930622-fe16-4fff-8a37-419cbafa39dd" (UID: "fc930622-fe16-4fff-8a37-419cbafa39dd"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.145227 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc930622-fe16-4fff-8a37-419cbafa39dd-kube-api-access-fqs7m" (OuterVolumeSpecName: "kube-api-access-fqs7m") pod "fc930622-fe16-4fff-8a37-419cbafa39dd" (UID: "fc930622-fe16-4fff-8a37-419cbafa39dd"). InnerVolumeSpecName "kube-api-access-fqs7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.148242 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-scripts" (OuterVolumeSpecName: "scripts") pod "fc930622-fe16-4fff-8a37-419cbafa39dd" (UID: "fc930622-fe16-4fff-8a37-419cbafa39dd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.195841 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fc930622-fe16-4fff-8a37-419cbafa39dd" (UID: "fc930622-fe16-4fff-8a37-419cbafa39dd"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.243781 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqs7m\" (UniqueName: \"kubernetes.io/projected/fc930622-fe16-4fff-8a37-419cbafa39dd-kube-api-access-fqs7m\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.245751 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.245767 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.245776 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fc930622-fe16-4fff-8a37-419cbafa39dd-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.245785 4932 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.265365 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-config-data" (OuterVolumeSpecName: "config-data") pod "fc930622-fe16-4fff-8a37-419cbafa39dd" (UID: "fc930622-fe16-4fff-8a37-419cbafa39dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.322287 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fc930622-fe16-4fff-8a37-419cbafa39dd" (UID: "fc930622-fe16-4fff-8a37-419cbafa39dd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.335601 4932 generic.go:334] "Generic (PLEG): container finished" podID="da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" containerID="f4a7675ac305600c60ae126df1a98bd624ca63d6310584a2191702cc5a4f0878" exitCode=0 Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.335629 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" event={"ID":"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8","Type":"ContainerDied","Data":"f4a7675ac305600c60ae126df1a98bd624ca63d6310584a2191702cc5a4f0878"} Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.339566 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerID="38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8" exitCode=0 Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.339644 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc930622-fe16-4fff-8a37-419cbafa39dd","Type":"ContainerDied","Data":"38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8"} Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.339669 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fc930622-fe16-4fff-8a37-419cbafa39dd","Type":"ContainerDied","Data":"58fa221680a60e121ec7f0b3aeb5642cb424bdc33b54cfeeb2b590d32dc52905"} Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.339686 4932 scope.go:117] "RemoveContainer" containerID="56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.339807 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.347242 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.347336 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fc930622-fe16-4fff-8a37-419cbafa39dd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.347621 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-ln6w4" event={"ID":"92f23c40-bf12-4901-8f08-5d306bab0cef","Type":"ContainerStarted","Data":"3445d785af0441b8b567d57c03e6835b2bfed12872a3896dad8940d2a438528d"} Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.347662 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-ln6w4" event={"ID":"92f23c40-bf12-4901-8f08-5d306bab0cef","Type":"ContainerStarted","Data":"46d965acc860bbaa95da783f3474538a6b5000da2dc9e824828e6aefd4b4407e"} Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.400632 4932 scope.go:117] "RemoveContainer" containerID="44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.406214 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-ln6w4" podStartSLOduration=2.406179376 podStartE2EDuration="2.406179376s" podCreationTimestamp="2025-11-25 09:11:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:11:19.391729027 +0000 UTC m=+1339.517758600" watchObservedRunningTime="2025-11-25 09:11:19.406179376 +0000 UTC m=+1339.532208939" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.438791 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.449233 4932 scope.go:117] "RemoveContainer" containerID="38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.476236 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.487661 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:11:19 crc kubenswrapper[4932]: E1125 09:11:19.488130 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="ceilometer-notification-agent" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.488146 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="ceilometer-notification-agent" Nov 25 09:11:19 crc kubenswrapper[4932]: E1125 09:11:19.488204 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="proxy-httpd" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.488215 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="proxy-httpd" Nov 25 09:11:19 crc kubenswrapper[4932]: E1125 09:11:19.488237 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="sg-core" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.488245 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="sg-core" Nov 25 09:11:19 crc kubenswrapper[4932]: E1125 09:11:19.488257 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="ceilometer-central-agent" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.488265 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="ceilometer-central-agent" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.488511 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="ceilometer-central-agent" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.488535 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="ceilometer-notification-agent" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.488549 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="sg-core" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.488574 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" containerName="proxy-httpd" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.490274 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.493724 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.493941 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.494094 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.501008 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.533918 4932 scope.go:117] "RemoveContainer" containerID="e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.588580 4932 scope.go:117] "RemoveContainer" containerID="56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd" Nov 25 09:11:19 crc kubenswrapper[4932]: E1125 09:11:19.589102 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd\": container with ID starting with 56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd not found: ID does not exist" containerID="56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.589145 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd"} err="failed to get container status \"56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd\": rpc error: code = NotFound desc = could not find container \"56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd\": container with ID starting with 56137797db92852d208d79942380ceb295f484a1d63c3ce999fb09dd4c342edd not found: ID does not exist" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.589171 4932 scope.go:117] "RemoveContainer" containerID="44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701" Nov 25 09:11:19 crc kubenswrapper[4932]: E1125 09:11:19.589637 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701\": container with ID starting with 44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701 not found: ID does not exist" containerID="44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.589669 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701"} err="failed to get container status \"44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701\": rpc error: code = NotFound desc = could not find container \"44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701\": container with ID starting with 44e92a4356a31fd6bf12fded97b8792020394a76806cf50b6683146e1c70a701 not found: ID does not exist" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.589689 4932 scope.go:117] "RemoveContainer" containerID="38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8" Nov 25 09:11:19 crc kubenswrapper[4932]: E1125 09:11:19.589885 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8\": container with ID starting with 38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8 not found: ID does not exist" containerID="38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.589908 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8"} err="failed to get container status \"38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8\": rpc error: code = NotFound desc = could not find container \"38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8\": container with ID starting with 38a5febbab2d330b310138654193f6ead08879f09e6e0a757298e78546a62cf8 not found: ID does not exist" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.589921 4932 scope.go:117] "RemoveContainer" containerID="e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31" Nov 25 09:11:19 crc kubenswrapper[4932]: E1125 09:11:19.590229 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31\": container with ID starting with e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31 not found: ID does not exist" containerID="e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.590298 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31"} err="failed to get container status \"e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31\": rpc error: code = NotFound desc = could not find container \"e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31\": container with ID starting with e4470692ac52f2cc8b00590f0b0ae4adfefe17f245134fa71638a039aaae3e31 not found: ID does not exist" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.654458 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.654542 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-run-httpd\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.654565 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.654585 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvx8l\" (UniqueName: \"kubernetes.io/projected/e38a2272-cb35-490e-8ea3-672050e88c8a-kube-api-access-zvx8l\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.654637 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-config-data\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.654684 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-log-httpd\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.654704 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-scripts\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.654731 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.755888 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-log-httpd\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.756337 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-scripts\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.756981 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.757053 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.757081 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-log-httpd\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.757469 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-run-httpd\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.757530 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.757576 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvx8l\" (UniqueName: \"kubernetes.io/projected/e38a2272-cb35-490e-8ea3-672050e88c8a-kube-api-access-zvx8l\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.757716 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-config-data\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.758095 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-run-httpd\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.763844 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.765125 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.765306 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.774991 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-scripts\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.777165 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvx8l\" (UniqueName: \"kubernetes.io/projected/e38a2272-cb35-490e-8ea3-672050e88c8a-kube-api-access-zvx8l\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.794919 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-config-data\") pod \"ceilometer-0\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " pod="openstack/ceilometer-0" Nov 25 09:11:19 crc kubenswrapper[4932]: I1125 09:11:19.825265 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:11:20 crc kubenswrapper[4932]: I1125 09:11:20.002531 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:20 crc kubenswrapper[4932]: I1125 09:11:20.019525 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:11:20 crc kubenswrapper[4932]: I1125 09:11:20.357971 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" event={"ID":"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8","Type":"ContainerStarted","Data":"71fd789f350620b59d2210426d201b277c0e3bcf014c5657123fd95a5d0459d1"} Nov 25 09:11:20 crc kubenswrapper[4932]: I1125 09:11:20.358395 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:20 crc kubenswrapper[4932]: I1125 09:11:20.390272 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" podStartSLOduration=4.390254336 podStartE2EDuration="4.390254336s" podCreationTimestamp="2025-11-25 09:11:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:11:20.380914436 +0000 UTC m=+1340.506944009" watchObservedRunningTime="2025-11-25 09:11:20.390254336 +0000 UTC m=+1340.516283899" Nov 25 09:11:20 crc kubenswrapper[4932]: I1125 09:11:20.433801 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:11:20 crc kubenswrapper[4932]: I1125 09:11:20.626927 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc930622-fe16-4fff-8a37-419cbafa39dd" path="/var/lib/kubelet/pods/fc930622-fe16-4fff-8a37-419cbafa39dd/volumes" Nov 25 09:11:23 crc kubenswrapper[4932]: I1125 09:11:23.389042 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e38a2272-cb35-490e-8ea3-672050e88c8a","Type":"ContainerStarted","Data":"f44482c0d2922b77fefb1aab38290fb92fb7af2f579f34ffa6deba43de5e2e8c"} Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.398412 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b071159f-2eb5-44b8-90da-1b9605430aea","Type":"ContainerStarted","Data":"22766927fe02c87ef13b604aa23003e23b4c65e52b9aed4158cb19b4dd1cc14c"} Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.402813 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b5f70393-a9e1-4c5b-8729-81051eb638b0","Type":"ContainerStarted","Data":"94e26657b4459559c5225a5cdc43af92ec60a964e5e3ba7b4573d1374940e49a"} Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.402830 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b5f70393-a9e1-4c5b-8729-81051eb638b0","Type":"ContainerStarted","Data":"747180a5ae1996b3690d4c5f6d543d60fd5f0c5f6b4280f2dca841186ba94062"} Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.402023 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8af5979c-58ee-4098-9e8d-06cf64551372" containerName="nova-metadata-log" containerID="cri-o://d17bde560a161c590d16c775414c8b026346ff6ba9d571ae932cc2c175cfe60d" gracePeriod=30 Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.398497 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="b071159f-2eb5-44b8-90da-1b9605430aea" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://22766927fe02c87ef13b604aa23003e23b4c65e52b9aed4158cb19b4dd1cc14c" gracePeriod=30 Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.402040 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="8af5979c-58ee-4098-9e8d-06cf64551372" containerName="nova-metadata-metadata" containerID="cri-o://775b16999b8996c42b4cdd116a94792c5605bf06571301d6f6d0fa103fb514f4" gracePeriod=30 Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.403539 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8af5979c-58ee-4098-9e8d-06cf64551372","Type":"ContainerStarted","Data":"775b16999b8996c42b4cdd116a94792c5605bf06571301d6f6d0fa103fb514f4"} Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.403559 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8af5979c-58ee-4098-9e8d-06cf64551372","Type":"ContainerStarted","Data":"d17bde560a161c590d16c775414c8b026346ff6ba9d571ae932cc2c175cfe60d"} Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.404696 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6f0f9e33-5d64-4861-ae33-c6e01ca8af78","Type":"ContainerStarted","Data":"7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014"} Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.409542 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e38a2272-cb35-490e-8ea3-672050e88c8a","Type":"ContainerStarted","Data":"2e60a47f179a4f97dac0a9146bee6132afeceb8a758026ac1b3bb79b4fc2bbff"} Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.418738 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.9380207350000003 podStartE2EDuration="8.418718229s" podCreationTimestamp="2025-11-25 09:11:16 +0000 UTC" firstStartedPulling="2025-11-25 09:11:17.924673237 +0000 UTC m=+1338.050702800" lastFinishedPulling="2025-11-25 09:11:23.405370741 +0000 UTC m=+1343.531400294" observedRunningTime="2025-11-25 09:11:24.413936251 +0000 UTC m=+1344.539965814" watchObservedRunningTime="2025-11-25 09:11:24.418718229 +0000 UTC m=+1344.544747812" Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.443080 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.673473215 podStartE2EDuration="8.443060545s" podCreationTimestamp="2025-11-25 09:11:16 +0000 UTC" firstStartedPulling="2025-11-25 09:11:17.654662757 +0000 UTC m=+1337.780692320" lastFinishedPulling="2025-11-25 09:11:23.424250087 +0000 UTC m=+1343.550279650" observedRunningTime="2025-11-25 09:11:24.432672712 +0000 UTC m=+1344.558702285" watchObservedRunningTime="2025-11-25 09:11:24.443060545 +0000 UTC m=+1344.569090108" Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.460845 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.655855888 podStartE2EDuration="8.460826046s" podCreationTimestamp="2025-11-25 09:11:16 +0000 UTC" firstStartedPulling="2025-11-25 09:11:17.6286696 +0000 UTC m=+1337.754699163" lastFinishedPulling="2025-11-25 09:11:23.433639748 +0000 UTC m=+1343.559669321" observedRunningTime="2025-11-25 09:11:24.44806779 +0000 UTC m=+1344.574097353" watchObservedRunningTime="2025-11-25 09:11:24.460826046 +0000 UTC m=+1344.586855599" Nov 25 09:11:24 crc kubenswrapper[4932]: I1125 09:11:24.478257 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.9771628 podStartE2EDuration="8.478236777s" podCreationTimestamp="2025-11-25 09:11:16 +0000 UTC" firstStartedPulling="2025-11-25 09:11:17.925493772 +0000 UTC m=+1338.051523335" lastFinishedPulling="2025-11-25 09:11:23.426567739 +0000 UTC m=+1343.552597312" observedRunningTime="2025-11-25 09:11:24.462840689 +0000 UTC m=+1344.588870252" watchObservedRunningTime="2025-11-25 09:11:24.478236777 +0000 UTC m=+1344.604266330" Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.422609 4932 generic.go:334] "Generic (PLEG): container finished" podID="8af5979c-58ee-4098-9e8d-06cf64551372" containerID="775b16999b8996c42b4cdd116a94792c5605bf06571301d6f6d0fa103fb514f4" exitCode=0 Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.422869 4932 generic.go:334] "Generic (PLEG): container finished" podID="8af5979c-58ee-4098-9e8d-06cf64551372" containerID="d17bde560a161c590d16c775414c8b026346ff6ba9d571ae932cc2c175cfe60d" exitCode=143 Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.422663 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8af5979c-58ee-4098-9e8d-06cf64551372","Type":"ContainerDied","Data":"775b16999b8996c42b4cdd116a94792c5605bf06571301d6f6d0fa103fb514f4"} Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.422930 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8af5979c-58ee-4098-9e8d-06cf64551372","Type":"ContainerDied","Data":"d17bde560a161c590d16c775414c8b026346ff6ba9d571ae932cc2c175cfe60d"} Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.424200 4932 generic.go:334] "Generic (PLEG): container finished" podID="a24ef146-76b6-4034-afce-fa2e2c94e641" containerID="756e2877e3a1a5bb86487a14267262b52d648ff3b7723eaf2254117cbadf4bb4" exitCode=0 Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.424259 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dnps4" event={"ID":"a24ef146-76b6-4034-afce-fa2e2c94e641","Type":"ContainerDied","Data":"756e2877e3a1a5bb86487a14267262b52d648ff3b7723eaf2254117cbadf4bb4"} Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.428046 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e38a2272-cb35-490e-8ea3-672050e88c8a","Type":"ContainerStarted","Data":"2206e31168db9bc10c8df760e4ae067016aaf211011534726aab5d8d2c224708"} Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.507120 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.595567 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-config-data\") pod \"8af5979c-58ee-4098-9e8d-06cf64551372\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.595795 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-combined-ca-bundle\") pod \"8af5979c-58ee-4098-9e8d-06cf64551372\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.595830 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8af5979c-58ee-4098-9e8d-06cf64551372-logs\") pod \"8af5979c-58ee-4098-9e8d-06cf64551372\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.595887 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqg66\" (UniqueName: \"kubernetes.io/projected/8af5979c-58ee-4098-9e8d-06cf64551372-kube-api-access-nqg66\") pod \"8af5979c-58ee-4098-9e8d-06cf64551372\" (UID: \"8af5979c-58ee-4098-9e8d-06cf64551372\") " Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.598076 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8af5979c-58ee-4098-9e8d-06cf64551372-logs" (OuterVolumeSpecName: "logs") pod "8af5979c-58ee-4098-9e8d-06cf64551372" (UID: "8af5979c-58ee-4098-9e8d-06cf64551372"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.601208 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8af5979c-58ee-4098-9e8d-06cf64551372-kube-api-access-nqg66" (OuterVolumeSpecName: "kube-api-access-nqg66") pod "8af5979c-58ee-4098-9e8d-06cf64551372" (UID: "8af5979c-58ee-4098-9e8d-06cf64551372"). InnerVolumeSpecName "kube-api-access-nqg66". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.630816 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-config-data" (OuterVolumeSpecName: "config-data") pod "8af5979c-58ee-4098-9e8d-06cf64551372" (UID: "8af5979c-58ee-4098-9e8d-06cf64551372"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.635867 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8af5979c-58ee-4098-9e8d-06cf64551372" (UID: "8af5979c-58ee-4098-9e8d-06cf64551372"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.698578 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8af5979c-58ee-4098-9e8d-06cf64551372-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.698615 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqg66\" (UniqueName: \"kubernetes.io/projected/8af5979c-58ee-4098-9e8d-06cf64551372-kube-api-access-nqg66\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.698631 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.698644 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8af5979c-58ee-4098-9e8d-06cf64551372-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:25 crc kubenswrapper[4932]: I1125 09:11:25.980536 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.438718 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.438722 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"8af5979c-58ee-4098-9e8d-06cf64551372","Type":"ContainerDied","Data":"42cc2fa00ddc062fc7b01ae162405a4a1ccdbaeb76aceba8b4d0a5fc00d6ac34"} Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.438890 4932 scope.go:117] "RemoveContainer" containerID="775b16999b8996c42b4cdd116a94792c5605bf06571301d6f6d0fa103fb514f4" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.442848 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e38a2272-cb35-490e-8ea3-672050e88c8a","Type":"ContainerStarted","Data":"d27e09fff1e0ff3c7fb6502dffa6adf6f46a0fb70748ecf30e635dfb400d5c61"} Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.472135 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.486890 4932 scope.go:117] "RemoveContainer" containerID="d17bde560a161c590d16c775414c8b026346ff6ba9d571ae932cc2c175cfe60d" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.487736 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.502289 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:26 crc kubenswrapper[4932]: E1125 09:11:26.502772 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8af5979c-58ee-4098-9e8d-06cf64551372" containerName="nova-metadata-log" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.502793 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8af5979c-58ee-4098-9e8d-06cf64551372" containerName="nova-metadata-log" Nov 25 09:11:26 crc kubenswrapper[4932]: E1125 09:11:26.502817 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8af5979c-58ee-4098-9e8d-06cf64551372" containerName="nova-metadata-metadata" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.502825 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8af5979c-58ee-4098-9e8d-06cf64551372" containerName="nova-metadata-metadata" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.503061 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="8af5979c-58ee-4098-9e8d-06cf64551372" containerName="nova-metadata-metadata" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.503079 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="8af5979c-58ee-4098-9e8d-06cf64551372" containerName="nova-metadata-log" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.504053 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.506141 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.507936 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.525662 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.616494 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.616547 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e60012a-8635-47b7-bd0f-05177a5e4714-logs\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.616667 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.616705 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-config-data\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.616845 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc2g2\" (UniqueName: \"kubernetes.io/projected/9e60012a-8635-47b7-bd0f-05177a5e4714-kube-api-access-hc2g2\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.639211 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8af5979c-58ee-4098-9e8d-06cf64551372" path="/var/lib/kubelet/pods/8af5979c-58ee-4098-9e8d-06cf64551372/volumes" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.718017 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-config-data\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.718853 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc2g2\" (UniqueName: \"kubernetes.io/projected/9e60012a-8635-47b7-bd0f-05177a5e4714-kube-api-access-hc2g2\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.718886 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.718907 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e60012a-8635-47b7-bd0f-05177a5e4714-logs\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.719066 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.720516 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e60012a-8635-47b7-bd0f-05177a5e4714-logs\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.724743 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.724805 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.726739 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-config-data\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.735897 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc2g2\" (UniqueName: \"kubernetes.io/projected/9e60012a-8635-47b7-bd0f-05177a5e4714-kube-api-access-hc2g2\") pod \"nova-metadata-0\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.831700 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.923536 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.923903 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:11:26 crc kubenswrapper[4932]: I1125 09:11:26.991845 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.130629 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-config-data\") pod \"a24ef146-76b6-4034-afce-fa2e2c94e641\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.131107 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-combined-ca-bundle\") pod \"a24ef146-76b6-4034-afce-fa2e2c94e641\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.131153 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcpl8\" (UniqueName: \"kubernetes.io/projected/a24ef146-76b6-4034-afce-fa2e2c94e641-kube-api-access-zcpl8\") pod \"a24ef146-76b6-4034-afce-fa2e2c94e641\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.131283 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-scripts\") pod \"a24ef146-76b6-4034-afce-fa2e2c94e641\" (UID: \"a24ef146-76b6-4034-afce-fa2e2c94e641\") " Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.140119 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-scripts" (OuterVolumeSpecName: "scripts") pod "a24ef146-76b6-4034-afce-fa2e2c94e641" (UID: "a24ef146-76b6-4034-afce-fa2e2c94e641"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.140386 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a24ef146-76b6-4034-afce-fa2e2c94e641-kube-api-access-zcpl8" (OuterVolumeSpecName: "kube-api-access-zcpl8") pod "a24ef146-76b6-4034-afce-fa2e2c94e641" (UID: "a24ef146-76b6-4034-afce-fa2e2c94e641"). InnerVolumeSpecName "kube-api-access-zcpl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.162608 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-config-data" (OuterVolumeSpecName: "config-data") pod "a24ef146-76b6-4034-afce-fa2e2c94e641" (UID: "a24ef146-76b6-4034-afce-fa2e2c94e641"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.166310 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a24ef146-76b6-4034-afce-fa2e2c94e641" (UID: "a24ef146-76b6-4034-afce-fa2e2c94e641"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.234339 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.234375 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.234390 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcpl8\" (UniqueName: \"kubernetes.io/projected/a24ef146-76b6-4034-afce-fa2e2c94e641-kube-api-access-zcpl8\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.234400 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a24ef146-76b6-4034-afce-fa2e2c94e641-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.294477 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.315487 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.315580 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.322365 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.395138 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:27 crc kubenswrapper[4932]: W1125 09:11:27.404861 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e60012a_8635_47b7_bd0f_05177a5e4714.slice/crio-f912f4fde38432ef7cf177a2b083d9526f0f3fef4c270bd77928777d8308d283 WatchSource:0}: Error finding container f912f4fde38432ef7cf177a2b083d9526f0f3fef4c270bd77928777d8308d283: Status 404 returned error can't find the container with id f912f4fde38432ef7cf177a2b083d9526f0f3fef4c270bd77928777d8308d283 Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.405222 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.408159 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-fqnsw"] Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.491732 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e38a2272-cb35-490e-8ea3-672050e88c8a","Type":"ContainerStarted","Data":"25d801f8101fc6eb6c2551500a95854455471e5066d6bc9c4d2efbdd92226c45"} Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.492113 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.499473 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9e60012a-8635-47b7-bd0f-05177a5e4714","Type":"ContainerStarted","Data":"f912f4fde38432ef7cf177a2b083d9526f0f3fef4c270bd77928777d8308d283"} Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.514512 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dnps4" event={"ID":"a24ef146-76b6-4034-afce-fa2e2c94e641","Type":"ContainerDied","Data":"a8dc641b731782387afcc59b08aa43ec34b1ed7653e08f7330974fad72d5ae2f"} Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.514551 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a8dc641b731782387afcc59b08aa43ec34b1ed7653e08f7330974fad72d5ae2f" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.514512 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" podUID="3757d7e1-b11f-4e98-964a-611d24f165af" containerName="dnsmasq-dns" containerID="cri-o://900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb" gracePeriod=10 Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.514588 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dnps4" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.535473 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=5.040370838 podStartE2EDuration="8.535454428s" podCreationTimestamp="2025-11-25 09:11:19 +0000 UTC" firstStartedPulling="2025-11-25 09:11:23.391389817 +0000 UTC m=+1343.517419370" lastFinishedPulling="2025-11-25 09:11:26.886473397 +0000 UTC m=+1347.012502960" observedRunningTime="2025-11-25 09:11:27.519601626 +0000 UTC m=+1347.645631199" watchObservedRunningTime="2025-11-25 09:11:27.535454428 +0000 UTC m=+1347.661483991" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.583076 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.647325 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.661217 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.661503 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerName="nova-api-log" containerID="cri-o://747180a5ae1996b3690d4c5f6d543d60fd5f0c5f6b4280f2dca841186ba94062" gracePeriod=30 Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.661993 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerName="nova-api-api" containerID="cri-o://94e26657b4459559c5225a5cdc43af92ec60a964e5e3ba7b4573d1374940e49a" gracePeriod=30 Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.674347 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": EOF" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.674351 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": EOF" Nov 25 09:11:27 crc kubenswrapper[4932]: I1125 09:11:27.681989 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.167913 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.368091 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44jmn\" (UniqueName: \"kubernetes.io/projected/3757d7e1-b11f-4e98-964a-611d24f165af-kube-api-access-44jmn\") pod \"3757d7e1-b11f-4e98-964a-611d24f165af\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.368246 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-swift-storage-0\") pod \"3757d7e1-b11f-4e98-964a-611d24f165af\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.368379 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-nb\") pod \"3757d7e1-b11f-4e98-964a-611d24f165af\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.368402 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-config\") pod \"3757d7e1-b11f-4e98-964a-611d24f165af\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.368418 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-sb\") pod \"3757d7e1-b11f-4e98-964a-611d24f165af\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.368460 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-svc\") pod \"3757d7e1-b11f-4e98-964a-611d24f165af\" (UID: \"3757d7e1-b11f-4e98-964a-611d24f165af\") " Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.373711 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3757d7e1-b11f-4e98-964a-611d24f165af-kube-api-access-44jmn" (OuterVolumeSpecName: "kube-api-access-44jmn") pod "3757d7e1-b11f-4e98-964a-611d24f165af" (UID: "3757d7e1-b11f-4e98-964a-611d24f165af"). InnerVolumeSpecName "kube-api-access-44jmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.422569 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3757d7e1-b11f-4e98-964a-611d24f165af" (UID: "3757d7e1-b11f-4e98-964a-611d24f165af"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.423659 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3757d7e1-b11f-4e98-964a-611d24f165af" (UID: "3757d7e1-b11f-4e98-964a-611d24f165af"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.427802 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-config" (OuterVolumeSpecName: "config") pod "3757d7e1-b11f-4e98-964a-611d24f165af" (UID: "3757d7e1-b11f-4e98-964a-611d24f165af"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.433557 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3757d7e1-b11f-4e98-964a-611d24f165af" (UID: "3757d7e1-b11f-4e98-964a-611d24f165af"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.439699 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3757d7e1-b11f-4e98-964a-611d24f165af" (UID: "3757d7e1-b11f-4e98-964a-611d24f165af"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.470359 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.470402 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.470413 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.470423 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.470434 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44jmn\" (UniqueName: \"kubernetes.io/projected/3757d7e1-b11f-4e98-964a-611d24f165af-kube-api-access-44jmn\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.470446 4932 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3757d7e1-b11f-4e98-964a-611d24f165af-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.524888 4932 generic.go:334] "Generic (PLEG): container finished" podID="3757d7e1-b11f-4e98-964a-611d24f165af" containerID="900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb" exitCode=0 Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.524966 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" event={"ID":"3757d7e1-b11f-4e98-964a-611d24f165af","Type":"ContainerDied","Data":"900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb"} Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.524997 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" event={"ID":"3757d7e1-b11f-4e98-964a-611d24f165af","Type":"ContainerDied","Data":"13f8d83b92cb9ac1655c7b7a9d850cb3f9fa765fad7ff81e8b0dfa549d849ffa"} Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.525017 4932 scope.go:117] "RemoveContainer" containerID="900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.525202 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-fqnsw" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.531574 4932 generic.go:334] "Generic (PLEG): container finished" podID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerID="747180a5ae1996b3690d4c5f6d543d60fd5f0c5f6b4280f2dca841186ba94062" exitCode=143 Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.531638 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b5f70393-a9e1-4c5b-8729-81051eb638b0","Type":"ContainerDied","Data":"747180a5ae1996b3690d4c5f6d543d60fd5f0c5f6b4280f2dca841186ba94062"} Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.536447 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9e60012a-8635-47b7-bd0f-05177a5e4714" containerName="nova-metadata-log" containerID="cri-o://a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1" gracePeriod=30 Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.536523 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9e60012a-8635-47b7-bd0f-05177a5e4714","Type":"ContainerStarted","Data":"a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b"} Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.536546 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9e60012a-8635-47b7-bd0f-05177a5e4714","Type":"ContainerStarted","Data":"a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1"} Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.537547 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="9e60012a-8635-47b7-bd0f-05177a5e4714" containerName="nova-metadata-metadata" containerID="cri-o://a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b" gracePeriod=30 Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.575526 4932 scope.go:117] "RemoveContainer" containerID="294edcdf59f6d0ce14f428296d0ac17e2f9df4e6d4cfbbe61523a9bd88effc0d" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.591233 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.591210874 podStartE2EDuration="2.591210874s" podCreationTimestamp="2025-11-25 09:11:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:11:28.575103485 +0000 UTC m=+1348.701133068" watchObservedRunningTime="2025-11-25 09:11:28.591210874 +0000 UTC m=+1348.717240447" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.603942 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-fqnsw"] Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.610567 4932 scope.go:117] "RemoveContainer" containerID="900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb" Nov 25 09:11:28 crc kubenswrapper[4932]: E1125 09:11:28.611097 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb\": container with ID starting with 900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb not found: ID does not exist" containerID="900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.611320 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb"} err="failed to get container status \"900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb\": rpc error: code = NotFound desc = could not find container \"900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb\": container with ID starting with 900e9081eb5d1046ee5503ecf45ab20d377a84abbba147668a92b1d7101d6ffb not found: ID does not exist" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.611339 4932 scope.go:117] "RemoveContainer" containerID="294edcdf59f6d0ce14f428296d0ac17e2f9df4e6d4cfbbe61523a9bd88effc0d" Nov 25 09:11:28 crc kubenswrapper[4932]: E1125 09:11:28.612319 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"294edcdf59f6d0ce14f428296d0ac17e2f9df4e6d4cfbbe61523a9bd88effc0d\": container with ID starting with 294edcdf59f6d0ce14f428296d0ac17e2f9df4e6d4cfbbe61523a9bd88effc0d not found: ID does not exist" containerID="294edcdf59f6d0ce14f428296d0ac17e2f9df4e6d4cfbbe61523a9bd88effc0d" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.612347 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"294edcdf59f6d0ce14f428296d0ac17e2f9df4e6d4cfbbe61523a9bd88effc0d"} err="failed to get container status \"294edcdf59f6d0ce14f428296d0ac17e2f9df4e6d4cfbbe61523a9bd88effc0d\": rpc error: code = NotFound desc = could not find container \"294edcdf59f6d0ce14f428296d0ac17e2f9df4e6d4cfbbe61523a9bd88effc0d\": container with ID starting with 294edcdf59f6d0ce14f428296d0ac17e2f9df4e6d4cfbbe61523a9bd88effc0d not found: ID does not exist" Nov 25 09:11:28 crc kubenswrapper[4932]: I1125 09:11:28.622553 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-fqnsw"] Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.113154 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.289062 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hc2g2\" (UniqueName: \"kubernetes.io/projected/9e60012a-8635-47b7-bd0f-05177a5e4714-kube-api-access-hc2g2\") pod \"9e60012a-8635-47b7-bd0f-05177a5e4714\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.289129 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-combined-ca-bundle\") pod \"9e60012a-8635-47b7-bd0f-05177a5e4714\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.289300 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-nova-metadata-tls-certs\") pod \"9e60012a-8635-47b7-bd0f-05177a5e4714\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.289327 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-config-data\") pod \"9e60012a-8635-47b7-bd0f-05177a5e4714\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.289359 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e60012a-8635-47b7-bd0f-05177a5e4714-logs\") pod \"9e60012a-8635-47b7-bd0f-05177a5e4714\" (UID: \"9e60012a-8635-47b7-bd0f-05177a5e4714\") " Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.290055 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e60012a-8635-47b7-bd0f-05177a5e4714-logs" (OuterVolumeSpecName: "logs") pod "9e60012a-8635-47b7-bd0f-05177a5e4714" (UID: "9e60012a-8635-47b7-bd0f-05177a5e4714"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.294102 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e60012a-8635-47b7-bd0f-05177a5e4714-kube-api-access-hc2g2" (OuterVolumeSpecName: "kube-api-access-hc2g2") pod "9e60012a-8635-47b7-bd0f-05177a5e4714" (UID: "9e60012a-8635-47b7-bd0f-05177a5e4714"). InnerVolumeSpecName "kube-api-access-hc2g2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.319307 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-config-data" (OuterVolumeSpecName: "config-data") pod "9e60012a-8635-47b7-bd0f-05177a5e4714" (UID: "9e60012a-8635-47b7-bd0f-05177a5e4714"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.322573 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9e60012a-8635-47b7-bd0f-05177a5e4714" (UID: "9e60012a-8635-47b7-bd0f-05177a5e4714"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.358357 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "9e60012a-8635-47b7-bd0f-05177a5e4714" (UID: "9e60012a-8635-47b7-bd0f-05177a5e4714"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.391761 4932 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.391791 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.391802 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e60012a-8635-47b7-bd0f-05177a5e4714-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.391810 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hc2g2\" (UniqueName: \"kubernetes.io/projected/9e60012a-8635-47b7-bd0f-05177a5e4714-kube-api-access-hc2g2\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.391818 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e60012a-8635-47b7-bd0f-05177a5e4714-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.548300 4932 generic.go:334] "Generic (PLEG): container finished" podID="92f23c40-bf12-4901-8f08-5d306bab0cef" containerID="3445d785af0441b8b567d57c03e6835b2bfed12872a3896dad8940d2a438528d" exitCode=0 Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.548366 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-ln6w4" event={"ID":"92f23c40-bf12-4901-8f08-5d306bab0cef","Type":"ContainerDied","Data":"3445d785af0441b8b567d57c03e6835b2bfed12872a3896dad8940d2a438528d"} Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.551503 4932 generic.go:334] "Generic (PLEG): container finished" podID="9e60012a-8635-47b7-bd0f-05177a5e4714" containerID="a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b" exitCode=0 Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.551640 4932 generic.go:334] "Generic (PLEG): container finished" podID="9e60012a-8635-47b7-bd0f-05177a5e4714" containerID="a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1" exitCode=143 Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.551853 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="6f0f9e33-5d64-4861-ae33-c6e01ca8af78" containerName="nova-scheduler-scheduler" containerID="cri-o://7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014" gracePeriod=30 Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.552214 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.563304 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9e60012a-8635-47b7-bd0f-05177a5e4714","Type":"ContainerDied","Data":"a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b"} Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.563370 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9e60012a-8635-47b7-bd0f-05177a5e4714","Type":"ContainerDied","Data":"a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1"} Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.563385 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"9e60012a-8635-47b7-bd0f-05177a5e4714","Type":"ContainerDied","Data":"f912f4fde38432ef7cf177a2b083d9526f0f3fef4c270bd77928777d8308d283"} Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.563405 4932 scope.go:117] "RemoveContainer" containerID="a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.610557 4932 scope.go:117] "RemoveContainer" containerID="a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.612791 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.630169 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.640350 4932 scope.go:117] "RemoveContainer" containerID="a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b" Nov 25 09:11:29 crc kubenswrapper[4932]: E1125 09:11:29.641147 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b\": container with ID starting with a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b not found: ID does not exist" containerID="a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.641201 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b"} err="failed to get container status \"a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b\": rpc error: code = NotFound desc = could not find container \"a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b\": container with ID starting with a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b not found: ID does not exist" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.641226 4932 scope.go:117] "RemoveContainer" containerID="a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1" Nov 25 09:11:29 crc kubenswrapper[4932]: E1125 09:11:29.641827 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1\": container with ID starting with a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1 not found: ID does not exist" containerID="a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.641851 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1"} err="failed to get container status \"a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1\": rpc error: code = NotFound desc = could not find container \"a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1\": container with ID starting with a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1 not found: ID does not exist" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.641867 4932 scope.go:117] "RemoveContainer" containerID="a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.642312 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b"} err="failed to get container status \"a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b\": rpc error: code = NotFound desc = could not find container \"a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b\": container with ID starting with a59e858b394d643cf9dc4a0843fbecc7fec786aaba5078a6b3ef940e692ad80b not found: ID does not exist" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.642332 4932 scope.go:117] "RemoveContainer" containerID="a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.643296 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1"} err="failed to get container status \"a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1\": rpc error: code = NotFound desc = could not find container \"a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1\": container with ID starting with a16f76ef7befd26a4543cd409cf7fa33f0d1551b60a6a149ce069b31175442c1 not found: ID does not exist" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.644845 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:29 crc kubenswrapper[4932]: E1125 09:11:29.645398 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3757d7e1-b11f-4e98-964a-611d24f165af" containerName="dnsmasq-dns" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.645423 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3757d7e1-b11f-4e98-964a-611d24f165af" containerName="dnsmasq-dns" Nov 25 09:11:29 crc kubenswrapper[4932]: E1125 09:11:29.645441 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3757d7e1-b11f-4e98-964a-611d24f165af" containerName="init" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.645448 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3757d7e1-b11f-4e98-964a-611d24f165af" containerName="init" Nov 25 09:11:29 crc kubenswrapper[4932]: E1125 09:11:29.645468 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e60012a-8635-47b7-bd0f-05177a5e4714" containerName="nova-metadata-metadata" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.645476 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e60012a-8635-47b7-bd0f-05177a5e4714" containerName="nova-metadata-metadata" Nov 25 09:11:29 crc kubenswrapper[4932]: E1125 09:11:29.645501 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a24ef146-76b6-4034-afce-fa2e2c94e641" containerName="nova-manage" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.645510 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a24ef146-76b6-4034-afce-fa2e2c94e641" containerName="nova-manage" Nov 25 09:11:29 crc kubenswrapper[4932]: E1125 09:11:29.645534 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e60012a-8635-47b7-bd0f-05177a5e4714" containerName="nova-metadata-log" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.645542 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e60012a-8635-47b7-bd0f-05177a5e4714" containerName="nova-metadata-log" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.645809 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3757d7e1-b11f-4e98-964a-611d24f165af" containerName="dnsmasq-dns" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.645831 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e60012a-8635-47b7-bd0f-05177a5e4714" containerName="nova-metadata-metadata" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.645851 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e60012a-8635-47b7-bd0f-05177a5e4714" containerName="nova-metadata-log" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.645864 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a24ef146-76b6-4034-afce-fa2e2c94e641" containerName="nova-manage" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.647209 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.651055 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.655667 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.663256 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.799006 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-config-data\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.799453 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a85938f2-0bc7-42f6-9b98-ceee092a8b19-logs\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.799561 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.799712 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9vwn\" (UniqueName: \"kubernetes.io/projected/a85938f2-0bc7-42f6-9b98-ceee092a8b19-kube-api-access-f9vwn\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.799765 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.900949 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-config-data\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.901006 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a85938f2-0bc7-42f6-9b98-ceee092a8b19-logs\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.901105 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.901251 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9vwn\" (UniqueName: \"kubernetes.io/projected/a85938f2-0bc7-42f6-9b98-ceee092a8b19-kube-api-access-f9vwn\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.901305 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.901733 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a85938f2-0bc7-42f6-9b98-ceee092a8b19-logs\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.904889 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.912955 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-config-data\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.913965 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.921951 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9vwn\" (UniqueName: \"kubernetes.io/projected/a85938f2-0bc7-42f6-9b98-ceee092a8b19-kube-api-access-f9vwn\") pod \"nova-metadata-0\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " pod="openstack/nova-metadata-0" Nov 25 09:11:29 crc kubenswrapper[4932]: I1125 09:11:29.983400 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:11:30 crc kubenswrapper[4932]: I1125 09:11:30.441893 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:11:30 crc kubenswrapper[4932]: W1125 09:11:30.444376 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda85938f2_0bc7_42f6_9b98_ceee092a8b19.slice/crio-174d597657371d0c0a031d32db846e277201febb711caebca0b962c3ec5d9d00 WatchSource:0}: Error finding container 174d597657371d0c0a031d32db846e277201febb711caebca0b962c3ec5d9d00: Status 404 returned error can't find the container with id 174d597657371d0c0a031d32db846e277201febb711caebca0b962c3ec5d9d00 Nov 25 09:11:30 crc kubenswrapper[4932]: I1125 09:11:30.562398 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85938f2-0bc7-42f6-9b98-ceee092a8b19","Type":"ContainerStarted","Data":"174d597657371d0c0a031d32db846e277201febb711caebca0b962c3ec5d9d00"} Nov 25 09:11:30 crc kubenswrapper[4932]: I1125 09:11:30.625119 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3757d7e1-b11f-4e98-964a-611d24f165af" path="/var/lib/kubelet/pods/3757d7e1-b11f-4e98-964a-611d24f165af/volumes" Nov 25 09:11:30 crc kubenswrapper[4932]: I1125 09:11:30.626301 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e60012a-8635-47b7-bd0f-05177a5e4714" path="/var/lib/kubelet/pods/9e60012a-8635-47b7-bd0f-05177a5e4714/volumes" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.017020 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.040552 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-combined-ca-bundle\") pod \"92f23c40-bf12-4901-8f08-5d306bab0cef\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.040684 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-scripts\") pod \"92f23c40-bf12-4901-8f08-5d306bab0cef\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.040713 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-config-data\") pod \"92f23c40-bf12-4901-8f08-5d306bab0cef\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.040731 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2n6f\" (UniqueName: \"kubernetes.io/projected/92f23c40-bf12-4901-8f08-5d306bab0cef-kube-api-access-t2n6f\") pod \"92f23c40-bf12-4901-8f08-5d306bab0cef\" (UID: \"92f23c40-bf12-4901-8f08-5d306bab0cef\") " Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.048065 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92f23c40-bf12-4901-8f08-5d306bab0cef-kube-api-access-t2n6f" (OuterVolumeSpecName: "kube-api-access-t2n6f") pod "92f23c40-bf12-4901-8f08-5d306bab0cef" (UID: "92f23c40-bf12-4901-8f08-5d306bab0cef"). InnerVolumeSpecName "kube-api-access-t2n6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.055324 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-scripts" (OuterVolumeSpecName: "scripts") pod "92f23c40-bf12-4901-8f08-5d306bab0cef" (UID: "92f23c40-bf12-4901-8f08-5d306bab0cef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.113105 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "92f23c40-bf12-4901-8f08-5d306bab0cef" (UID: "92f23c40-bf12-4901-8f08-5d306bab0cef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.120787 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-config-data" (OuterVolumeSpecName: "config-data") pod "92f23c40-bf12-4901-8f08-5d306bab0cef" (UID: "92f23c40-bf12-4901-8f08-5d306bab0cef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.143219 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.143246 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2n6f\" (UniqueName: \"kubernetes.io/projected/92f23c40-bf12-4901-8f08-5d306bab0cef-kube-api-access-t2n6f\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.143257 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.143267 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/92f23c40-bf12-4901-8f08-5d306bab0cef-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.463849 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.573821 4932 generic.go:334] "Generic (PLEG): container finished" podID="6f0f9e33-5d64-4861-ae33-c6e01ca8af78" containerID="7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014" exitCode=0 Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.573884 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.573905 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6f0f9e33-5d64-4861-ae33-c6e01ca8af78","Type":"ContainerDied","Data":"7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014"} Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.573933 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6f0f9e33-5d64-4861-ae33-c6e01ca8af78","Type":"ContainerDied","Data":"ded55ae0e220180211aba9b0c0c19c1ae4bbe2b11f730949917a0e2cd04fd273"} Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.573962 4932 scope.go:117] "RemoveContainer" containerID="7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.577218 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85938f2-0bc7-42f6-9b98-ceee092a8b19","Type":"ContainerStarted","Data":"acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73"} Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.577265 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85938f2-0bc7-42f6-9b98-ceee092a8b19","Type":"ContainerStarted","Data":"105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0"} Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.578975 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-ln6w4" event={"ID":"92f23c40-bf12-4901-8f08-5d306bab0cef","Type":"ContainerDied","Data":"46d965acc860bbaa95da783f3474538a6b5000da2dc9e824828e6aefd4b4407e"} Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.579008 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="46d965acc860bbaa95da783f3474538a6b5000da2dc9e824828e6aefd4b4407e" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.579031 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-ln6w4" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.596016 4932 scope.go:117] "RemoveContainer" containerID="7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014" Nov 25 09:11:31 crc kubenswrapper[4932]: E1125 09:11:31.596699 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014\": container with ID starting with 7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014 not found: ID does not exist" containerID="7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.596737 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014"} err="failed to get container status \"7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014\": rpc error: code = NotFound desc = could not find container \"7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014\": container with ID starting with 7339daf891e8b37c99d5937b0179890283d5d88a456319bc20d14a3d48500014 not found: ID does not exist" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.620433 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.620406796 podStartE2EDuration="2.620406796s" podCreationTimestamp="2025-11-25 09:11:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:11:31.609487607 +0000 UTC m=+1351.735517170" watchObservedRunningTime="2025-11-25 09:11:31.620406796 +0000 UTC m=+1351.746436369" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.652799 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-combined-ca-bundle\") pod \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.652985 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-config-data\") pod \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.653114 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqm2z\" (UniqueName: \"kubernetes.io/projected/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-kube-api-access-pqm2z\") pod \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\" (UID: \"6f0f9e33-5d64-4861-ae33-c6e01ca8af78\") " Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.660455 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-kube-api-access-pqm2z" (OuterVolumeSpecName: "kube-api-access-pqm2z") pod "6f0f9e33-5d64-4861-ae33-c6e01ca8af78" (UID: "6f0f9e33-5d64-4861-ae33-c6e01ca8af78"). InnerVolumeSpecName "kube-api-access-pqm2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.690469 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-config-data" (OuterVolumeSpecName: "config-data") pod "6f0f9e33-5d64-4861-ae33-c6e01ca8af78" (UID: "6f0f9e33-5d64-4861-ae33-c6e01ca8af78"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.708324 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 09:11:31 crc kubenswrapper[4932]: E1125 09:11:31.708780 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f0f9e33-5d64-4861-ae33-c6e01ca8af78" containerName="nova-scheduler-scheduler" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.708804 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f0f9e33-5d64-4861-ae33-c6e01ca8af78" containerName="nova-scheduler-scheduler" Nov 25 09:11:31 crc kubenswrapper[4932]: E1125 09:11:31.708837 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92f23c40-bf12-4901-8f08-5d306bab0cef" containerName="nova-cell1-conductor-db-sync" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.708847 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="92f23c40-bf12-4901-8f08-5d306bab0cef" containerName="nova-cell1-conductor-db-sync" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.709084 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f0f9e33-5d64-4861-ae33-c6e01ca8af78" containerName="nova-scheduler-scheduler" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.709115 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="92f23c40-bf12-4901-8f08-5d306bab0cef" containerName="nova-cell1-conductor-db-sync" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.709900 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.711252 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f0f9e33-5d64-4861-ae33-c6e01ca8af78" (UID: "6f0f9e33-5d64-4861-ae33-c6e01ca8af78"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.721434 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.756992 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.757024 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqm2z\" (UniqueName: \"kubernetes.io/projected/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-kube-api-access-pqm2z\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.757037 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f0f9e33-5d64-4861-ae33-c6e01ca8af78-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.787351 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.858427 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znt4v\" (UniqueName: \"kubernetes.io/projected/d586a3b8-c6b8-4c6e-aa6f-11797966d218-kube-api-access-znt4v\") pod \"nova-cell1-conductor-0\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.858540 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.858615 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.903851 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.912420 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.932104 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.933499 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.937041 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.946045 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.960818 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znt4v\" (UniqueName: \"kubernetes.io/projected/d586a3b8-c6b8-4c6e-aa6f-11797966d218-kube-api-access-znt4v\") pod \"nova-cell1-conductor-0\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.960909 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.960974 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.977393 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.983420 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:31 crc kubenswrapper[4932]: I1125 09:11:31.991743 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znt4v\" (UniqueName: \"kubernetes.io/projected/d586a3b8-c6b8-4c6e-aa6f-11797966d218-kube-api-access-znt4v\") pod \"nova-cell1-conductor-0\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.043111 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.063170 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-config-data\") pod \"nova-scheduler-0\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.063275 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc9gj\" (UniqueName: \"kubernetes.io/projected/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-kube-api-access-pc9gj\") pod \"nova-scheduler-0\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.063573 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.166213 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.166425 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-config-data\") pod \"nova-scheduler-0\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.166459 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc9gj\" (UniqueName: \"kubernetes.io/projected/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-kube-api-access-pc9gj\") pod \"nova-scheduler-0\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.173589 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-config-data\") pod \"nova-scheduler-0\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.176074 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.195970 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc9gj\" (UniqueName: \"kubernetes.io/projected/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-kube-api-access-pc9gj\") pod \"nova-scheduler-0\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " pod="openstack/nova-scheduler-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.248810 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.575608 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.618848 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f0f9e33-5d64-4861-ae33-c6e01ca8af78" path="/var/lib/kubelet/pods/6f0f9e33-5d64-4861-ae33-c6e01ca8af78/volumes" Nov 25 09:11:32 crc kubenswrapper[4932]: W1125 09:11:32.749345 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac2f09cf_97e0_4446_9fc6_04bd3ffba71b.slice/crio-5d69eacfad374e39bf33d92e3276835c1d00c339cf257bab0a5bb120f4093378 WatchSource:0}: Error finding container 5d69eacfad374e39bf33d92e3276835c1d00c339cf257bab0a5bb120f4093378: Status 404 returned error can't find the container with id 5d69eacfad374e39bf33d92e3276835c1d00c339cf257bab0a5bb120f4093378 Nov 25 09:11:32 crc kubenswrapper[4932]: I1125 09:11:32.761925 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:11:33 crc kubenswrapper[4932]: I1125 09:11:33.603419 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b","Type":"ContainerStarted","Data":"2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf"} Nov 25 09:11:33 crc kubenswrapper[4932]: I1125 09:11:33.603765 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b","Type":"ContainerStarted","Data":"5d69eacfad374e39bf33d92e3276835c1d00c339cf257bab0a5bb120f4093378"} Nov 25 09:11:33 crc kubenswrapper[4932]: I1125 09:11:33.605779 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"d586a3b8-c6b8-4c6e-aa6f-11797966d218","Type":"ContainerStarted","Data":"9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863"} Nov 25 09:11:33 crc kubenswrapper[4932]: I1125 09:11:33.605830 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"d586a3b8-c6b8-4c6e-aa6f-11797966d218","Type":"ContainerStarted","Data":"2e1d2da76a36d06943a2765c3b2ee678c59cdcdf2b414703bff0876df2fc7415"} Nov 25 09:11:33 crc kubenswrapper[4932]: I1125 09:11:33.606051 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:33 crc kubenswrapper[4932]: I1125 09:11:33.629855 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.629836029 podStartE2EDuration="2.629836029s" podCreationTimestamp="2025-11-25 09:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:11:33.629742116 +0000 UTC m=+1353.755771689" watchObservedRunningTime="2025-11-25 09:11:33.629836029 +0000 UTC m=+1353.755865592" Nov 25 09:11:33 crc kubenswrapper[4932]: I1125 09:11:33.656925 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.656905879 podStartE2EDuration="2.656905879s" podCreationTimestamp="2025-11-25 09:11:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:11:33.647675643 +0000 UTC m=+1353.773705206" watchObservedRunningTime="2025-11-25 09:11:33.656905879 +0000 UTC m=+1353.782935462" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.617215 4932 generic.go:334] "Generic (PLEG): container finished" podID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerID="94e26657b4459559c5225a5cdc43af92ec60a964e5e3ba7b4573d1374940e49a" exitCode=0 Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.618846 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b5f70393-a9e1-4c5b-8729-81051eb638b0","Type":"ContainerDied","Data":"94e26657b4459559c5225a5cdc43af92ec60a964e5e3ba7b4573d1374940e49a"} Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.618885 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b5f70393-a9e1-4c5b-8729-81051eb638b0","Type":"ContainerDied","Data":"43c4b6ebe40ccf68a9e372a3c33793d6a9d8bae4f1164473836c3f226fa77595"} Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.618897 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="43c4b6ebe40ccf68a9e372a3c33793d6a9d8bae4f1164473836c3f226fa77595" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.633206 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.726486 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-combined-ca-bundle\") pod \"b5f70393-a9e1-4c5b-8729-81051eb638b0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.726824 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5f70393-a9e1-4c5b-8729-81051eb638b0-logs\") pod \"b5f70393-a9e1-4c5b-8729-81051eb638b0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.727271 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5f70393-a9e1-4c5b-8729-81051eb638b0-logs" (OuterVolumeSpecName: "logs") pod "b5f70393-a9e1-4c5b-8729-81051eb638b0" (UID: "b5f70393-a9e1-4c5b-8729-81051eb638b0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.727450 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-config-data\") pod \"b5f70393-a9e1-4c5b-8729-81051eb638b0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.727487 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4f4nq\" (UniqueName: \"kubernetes.io/projected/b5f70393-a9e1-4c5b-8729-81051eb638b0-kube-api-access-4f4nq\") pod \"b5f70393-a9e1-4c5b-8729-81051eb638b0\" (UID: \"b5f70393-a9e1-4c5b-8729-81051eb638b0\") " Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.728361 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5f70393-a9e1-4c5b-8729-81051eb638b0-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.747712 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5f70393-a9e1-4c5b-8729-81051eb638b0-kube-api-access-4f4nq" (OuterVolumeSpecName: "kube-api-access-4f4nq") pod "b5f70393-a9e1-4c5b-8729-81051eb638b0" (UID: "b5f70393-a9e1-4c5b-8729-81051eb638b0"). InnerVolumeSpecName "kube-api-access-4f4nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.755081 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5f70393-a9e1-4c5b-8729-81051eb638b0" (UID: "b5f70393-a9e1-4c5b-8729-81051eb638b0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.757289 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-config-data" (OuterVolumeSpecName: "config-data") pod "b5f70393-a9e1-4c5b-8729-81051eb638b0" (UID: "b5f70393-a9e1-4c5b-8729-81051eb638b0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.830576 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.830606 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5f70393-a9e1-4c5b-8729-81051eb638b0-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.830618 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4f4nq\" (UniqueName: \"kubernetes.io/projected/b5f70393-a9e1-4c5b-8729-81051eb638b0-kube-api-access-4f4nq\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.983743 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 09:11:34 crc kubenswrapper[4932]: I1125 09:11:34.983811 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.629285 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.665334 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.680533 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.697378 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 09:11:35 crc kubenswrapper[4932]: E1125 09:11:35.697744 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerName="nova-api-log" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.697762 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerName="nova-api-log" Nov 25 09:11:35 crc kubenswrapper[4932]: E1125 09:11:35.697797 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerName="nova-api-api" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.697804 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerName="nova-api-api" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.697965 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerName="nova-api-api" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.697998 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5f70393-a9e1-4c5b-8729-81051eb638b0" containerName="nova-api-log" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.698890 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.701391 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.719226 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.870930 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.871007 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1b73638-f961-4dda-84e1-6fdab91a5fac-logs\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.871342 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjkxj\" (UniqueName: \"kubernetes.io/projected/c1b73638-f961-4dda-84e1-6fdab91a5fac-kube-api-access-pjkxj\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.871571 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-config-data\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.972817 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.972882 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1b73638-f961-4dda-84e1-6fdab91a5fac-logs\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.972908 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjkxj\" (UniqueName: \"kubernetes.io/projected/c1b73638-f961-4dda-84e1-6fdab91a5fac-kube-api-access-pjkxj\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.972966 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-config-data\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.973416 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1b73638-f961-4dda-84e1-6fdab91a5fac-logs\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.978239 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.978831 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-config-data\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:35 crc kubenswrapper[4932]: I1125 09:11:35.991063 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjkxj\" (UniqueName: \"kubernetes.io/projected/c1b73638-f961-4dda-84e1-6fdab91a5fac-kube-api-access-pjkxj\") pod \"nova-api-0\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " pod="openstack/nova-api-0" Nov 25 09:11:36 crc kubenswrapper[4932]: I1125 09:11:36.055739 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:11:36 crc kubenswrapper[4932]: W1125 09:11:36.535803 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1b73638_f961_4dda_84e1_6fdab91a5fac.slice/crio-6c97d4d28c93ae46d2857429ec33194d809fa35c51500765451f58ce68696e6d WatchSource:0}: Error finding container 6c97d4d28c93ae46d2857429ec33194d809fa35c51500765451f58ce68696e6d: Status 404 returned error can't find the container with id 6c97d4d28c93ae46d2857429ec33194d809fa35c51500765451f58ce68696e6d Nov 25 09:11:36 crc kubenswrapper[4932]: I1125 09:11:36.538033 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:11:36 crc kubenswrapper[4932]: I1125 09:11:36.624943 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5f70393-a9e1-4c5b-8729-81051eb638b0" path="/var/lib/kubelet/pods/b5f70393-a9e1-4c5b-8729-81051eb638b0/volumes" Nov 25 09:11:36 crc kubenswrapper[4932]: I1125 09:11:36.640696 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c1b73638-f961-4dda-84e1-6fdab91a5fac","Type":"ContainerStarted","Data":"6c97d4d28c93ae46d2857429ec33194d809fa35c51500765451f58ce68696e6d"} Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.131593 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.181423 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.181478 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.181517 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.182198 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"647e12679adbf63ee5c63458089dff922023eeb7cd99a634cbd8c2a9db9a0cd7"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.182256 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://647e12679adbf63ee5c63458089dff922023eeb7cd99a634cbd8c2a9db9a0cd7" gracePeriod=600 Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.249064 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.652289 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="647e12679adbf63ee5c63458089dff922023eeb7cd99a634cbd8c2a9db9a0cd7" exitCode=0 Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.652325 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"647e12679adbf63ee5c63458089dff922023eeb7cd99a634cbd8c2a9db9a0cd7"} Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.652656 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d"} Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.652675 4932 scope.go:117] "RemoveContainer" containerID="96dca2522fb61785e671095f31f8032a7eb4d218c261ece3b2cefa1b8cd2013b" Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.654682 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c1b73638-f961-4dda-84e1-6fdab91a5fac","Type":"ContainerStarted","Data":"f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1"} Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.654708 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c1b73638-f961-4dda-84e1-6fdab91a5fac","Type":"ContainerStarted","Data":"600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619"} Nov 25 09:11:37 crc kubenswrapper[4932]: I1125 09:11:37.703819 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.703802705 podStartE2EDuration="2.703802705s" podCreationTimestamp="2025-11-25 09:11:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:11:37.697890261 +0000 UTC m=+1357.823919824" watchObservedRunningTime="2025-11-25 09:11:37.703802705 +0000 UTC m=+1357.829832268" Nov 25 09:11:39 crc kubenswrapper[4932]: I1125 09:11:39.983992 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 09:11:39 crc kubenswrapper[4932]: I1125 09:11:39.984609 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 09:11:41 crc kubenswrapper[4932]: I1125 09:11:41.000406 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 09:11:41 crc kubenswrapper[4932]: I1125 09:11:41.000452 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 09:11:42 crc kubenswrapper[4932]: I1125 09:11:42.249864 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 09:11:42 crc kubenswrapper[4932]: I1125 09:11:42.285080 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 09:11:42 crc kubenswrapper[4932]: I1125 09:11:42.730120 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 09:11:46 crc kubenswrapper[4932]: I1125 09:11:46.056653 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:11:46 crc kubenswrapper[4932]: I1125 09:11:46.057514 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:11:47 crc kubenswrapper[4932]: I1125 09:11:47.139401 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.200:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:11:47 crc kubenswrapper[4932]: I1125 09:11:47.139440 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.200:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:11:49 crc kubenswrapper[4932]: I1125 09:11:49.834656 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 09:11:49 crc kubenswrapper[4932]: I1125 09:11:49.988448 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 09:11:49 crc kubenswrapper[4932]: I1125 09:11:49.990167 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 09:11:49 crc kubenswrapper[4932]: I1125 09:11:49.994673 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 09:11:50 crc kubenswrapper[4932]: I1125 09:11:50.784009 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 09:11:54 crc kubenswrapper[4932]: I1125 09:11:54.811914 4932 generic.go:334] "Generic (PLEG): container finished" podID="b071159f-2eb5-44b8-90da-1b9605430aea" containerID="22766927fe02c87ef13b604aa23003e23b4c65e52b9aed4158cb19b4dd1cc14c" exitCode=137 Nov 25 09:11:54 crc kubenswrapper[4932]: I1125 09:11:54.811968 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b071159f-2eb5-44b8-90da-1b9605430aea","Type":"ContainerDied","Data":"22766927fe02c87ef13b604aa23003e23b4c65e52b9aed4158cb19b4dd1cc14c"} Nov 25 09:11:54 crc kubenswrapper[4932]: I1125 09:11:54.812356 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b071159f-2eb5-44b8-90da-1b9605430aea","Type":"ContainerDied","Data":"0a165f3e5d46d4e3c385987800722a422752ead8836532d64a4186c1c607d2fe"} Nov 25 09:11:54 crc kubenswrapper[4932]: I1125 09:11:54.812377 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a165f3e5d46d4e3c385987800722a422752ead8836532d64a4186c1c607d2fe" Nov 25 09:11:54 crc kubenswrapper[4932]: I1125 09:11:54.876655 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.053042 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-combined-ca-bundle\") pod \"b071159f-2eb5-44b8-90da-1b9605430aea\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.054290 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqx6c\" (UniqueName: \"kubernetes.io/projected/b071159f-2eb5-44b8-90da-1b9605430aea-kube-api-access-rqx6c\") pod \"b071159f-2eb5-44b8-90da-1b9605430aea\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.054336 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-config-data\") pod \"b071159f-2eb5-44b8-90da-1b9605430aea\" (UID: \"b071159f-2eb5-44b8-90da-1b9605430aea\") " Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.059389 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b071159f-2eb5-44b8-90da-1b9605430aea-kube-api-access-rqx6c" (OuterVolumeSpecName: "kube-api-access-rqx6c") pod "b071159f-2eb5-44b8-90da-1b9605430aea" (UID: "b071159f-2eb5-44b8-90da-1b9605430aea"). InnerVolumeSpecName "kube-api-access-rqx6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.081068 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-config-data" (OuterVolumeSpecName: "config-data") pod "b071159f-2eb5-44b8-90da-1b9605430aea" (UID: "b071159f-2eb5-44b8-90da-1b9605430aea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.091131 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b071159f-2eb5-44b8-90da-1b9605430aea" (UID: "b071159f-2eb5-44b8-90da-1b9605430aea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.156423 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.156675 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqx6c\" (UniqueName: \"kubernetes.io/projected/b071159f-2eb5-44b8-90da-1b9605430aea-kube-api-access-rqx6c\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.156746 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b071159f-2eb5-44b8-90da-1b9605430aea-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.821417 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.864335 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.885180 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.896138 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:11:55 crc kubenswrapper[4932]: E1125 09:11:55.896538 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b071159f-2eb5-44b8-90da-1b9605430aea" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.896557 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b071159f-2eb5-44b8-90da-1b9605430aea" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.896772 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b071159f-2eb5-44b8-90da-1b9605430aea" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.897355 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.901667 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.901775 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.901903 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 09:11:55 crc kubenswrapper[4932]: I1125 09:11:55.905246 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.060518 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.060593 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.061043 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.061088 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.064222 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.065351 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.071216 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.071257 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.071327 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.071616 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.071947 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjqk7\" (UniqueName: \"kubernetes.io/projected/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-kube-api-access-hjqk7\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.173759 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.173853 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.173927 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.174033 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.174156 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjqk7\" (UniqueName: \"kubernetes.io/projected/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-kube-api-access-hjqk7\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.193598 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.194877 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.197657 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.202651 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.217106 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjqk7\" (UniqueName: \"kubernetes.io/projected/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-kube-api-access-hjqk7\") pod \"nova-cell1-novncproxy-0\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.228926 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.347859 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-qbfss"] Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.356859 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.373763 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-qbfss"] Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.484967 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5x5hp\" (UniqueName: \"kubernetes.io/projected/a9855d3c-818d-4804-add2-d6b0fce52613-kube-api-access-5x5hp\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.485769 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-swift-storage-0\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.485874 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-svc\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.487099 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-nb\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.487172 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-sb\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.487316 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-config\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.590008 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5x5hp\" (UniqueName: \"kubernetes.io/projected/a9855d3c-818d-4804-add2-d6b0fce52613-kube-api-access-5x5hp\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.590122 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-swift-storage-0\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.590164 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-svc\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.590253 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-nb\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.590276 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-sb\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.590335 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-config\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.592571 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-nb\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.592735 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-swift-storage-0\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.593179 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-sb\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.594055 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-svc\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.594174 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-config\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.617281 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5x5hp\" (UniqueName: \"kubernetes.io/projected/a9855d3c-818d-4804-add2-d6b0fce52613-kube-api-access-5x5hp\") pod \"dnsmasq-dns-55bfb77665-qbfss\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.624949 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b071159f-2eb5-44b8-90da-1b9605430aea" path="/var/lib/kubelet/pods/b071159f-2eb5-44b8-90da-1b9605430aea/volumes" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.748058 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:56 crc kubenswrapper[4932]: I1125 09:11:56.851500 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:11:57 crc kubenswrapper[4932]: I1125 09:11:57.251365 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-qbfss"] Nov 25 09:11:57 crc kubenswrapper[4932]: I1125 09:11:57.840564 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f","Type":"ContainerStarted","Data":"b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47"} Nov 25 09:11:57 crc kubenswrapper[4932]: I1125 09:11:57.840867 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f","Type":"ContainerStarted","Data":"ecb948a960cfa3f7ab50a760d6fb2b5cd5651f5b1ba1274dae9c83117830bd7d"} Nov 25 09:11:57 crc kubenswrapper[4932]: I1125 09:11:57.842178 4932 generic.go:334] "Generic (PLEG): container finished" podID="a9855d3c-818d-4804-add2-d6b0fce52613" containerID="31f10fa596503c558e74a122048e857cd990eaba6b0da87eb56ec3d77736763e" exitCode=0 Nov 25 09:11:57 crc kubenswrapper[4932]: I1125 09:11:57.843313 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" event={"ID":"a9855d3c-818d-4804-add2-d6b0fce52613","Type":"ContainerDied","Data":"31f10fa596503c558e74a122048e857cd990eaba6b0da87eb56ec3d77736763e"} Nov 25 09:11:57 crc kubenswrapper[4932]: I1125 09:11:57.843345 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" event={"ID":"a9855d3c-818d-4804-add2-d6b0fce52613","Type":"ContainerStarted","Data":"5a9d32ed53090394bbfdda6cf63805ca96774969fb605b5e3f2d6659302d976f"} Nov 25 09:11:57 crc kubenswrapper[4932]: I1125 09:11:57.870396 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.870378176 podStartE2EDuration="2.870378176s" podCreationTimestamp="2025-11-25 09:11:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:11:57.868177938 +0000 UTC m=+1377.994207501" watchObservedRunningTime="2025-11-25 09:11:57.870378176 +0000 UTC m=+1377.996407739" Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.390335 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.390859 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="ceilometer-central-agent" containerID="cri-o://2e60a47f179a4f97dac0a9146bee6132afeceb8a758026ac1b3bb79b4fc2bbff" gracePeriod=30 Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.390920 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="sg-core" containerID="cri-o://d27e09fff1e0ff3c7fb6502dffa6adf6f46a0fb70748ecf30e635dfb400d5c61" gracePeriod=30 Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.390943 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="ceilometer-notification-agent" containerID="cri-o://2206e31168db9bc10c8df760e4ae067016aaf211011534726aab5d8d2c224708" gracePeriod=30 Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.390914 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="proxy-httpd" containerID="cri-o://25d801f8101fc6eb6c2551500a95854455471e5066d6bc9c4d2efbdd92226c45" gracePeriod=30 Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.854126 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" event={"ID":"a9855d3c-818d-4804-add2-d6b0fce52613","Type":"ContainerStarted","Data":"b8a9c7b4d8aee9148d3f80ec0b3f039c905ef40f6d4c9c1480a1255ba2197d40"} Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.854319 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.857050 4932 generic.go:334] "Generic (PLEG): container finished" podID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerID="25d801f8101fc6eb6c2551500a95854455471e5066d6bc9c4d2efbdd92226c45" exitCode=0 Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.857113 4932 generic.go:334] "Generic (PLEG): container finished" podID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerID="d27e09fff1e0ff3c7fb6502dffa6adf6f46a0fb70748ecf30e635dfb400d5c61" exitCode=2 Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.857127 4932 generic.go:334] "Generic (PLEG): container finished" podID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerID="2e60a47f179a4f97dac0a9146bee6132afeceb8a758026ac1b3bb79b4fc2bbff" exitCode=0 Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.858005 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e38a2272-cb35-490e-8ea3-672050e88c8a","Type":"ContainerDied","Data":"25d801f8101fc6eb6c2551500a95854455471e5066d6bc9c4d2efbdd92226c45"} Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.858048 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e38a2272-cb35-490e-8ea3-672050e88c8a","Type":"ContainerDied","Data":"d27e09fff1e0ff3c7fb6502dffa6adf6f46a0fb70748ecf30e635dfb400d5c61"} Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.858061 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e38a2272-cb35-490e-8ea3-672050e88c8a","Type":"ContainerDied","Data":"2e60a47f179a4f97dac0a9146bee6132afeceb8a758026ac1b3bb79b4fc2bbff"} Nov 25 09:11:58 crc kubenswrapper[4932]: I1125 09:11:58.874940 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" podStartSLOduration=2.874920105 podStartE2EDuration="2.874920105s" podCreationTimestamp="2025-11-25 09:11:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:11:58.871575969 +0000 UTC m=+1378.997605542" watchObservedRunningTime="2025-11-25 09:11:58.874920105 +0000 UTC m=+1379.000949688" Nov 25 09:11:59 crc kubenswrapper[4932]: I1125 09:11:59.010079 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:11:59 crc kubenswrapper[4932]: I1125 09:11:59.010338 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerName="nova-api-log" containerID="cri-o://600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619" gracePeriod=30 Nov 25 09:11:59 crc kubenswrapper[4932]: I1125 09:11:59.010388 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerName="nova-api-api" containerID="cri-o://f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1" gracePeriod=30 Nov 25 09:11:59 crc kubenswrapper[4932]: I1125 09:11:59.867348 4932 generic.go:334] "Generic (PLEG): container finished" podID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerID="600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619" exitCode=143 Nov 25 09:11:59 crc kubenswrapper[4932]: I1125 09:11:59.867436 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c1b73638-f961-4dda-84e1-6fdab91a5fac","Type":"ContainerDied","Data":"600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619"} Nov 25 09:12:01 crc kubenswrapper[4932]: I1125 09:12:01.230688 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.762407 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.899276 4932 generic.go:334] "Generic (PLEG): container finished" podID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerID="2206e31168db9bc10c8df760e4ae067016aaf211011534726aab5d8d2c224708" exitCode=0 Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.899374 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e38a2272-cb35-490e-8ea3-672050e88c8a","Type":"ContainerDied","Data":"2206e31168db9bc10c8df760e4ae067016aaf211011534726aab5d8d2c224708"} Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.902267 4932 generic.go:334] "Generic (PLEG): container finished" podID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerID="f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1" exitCode=0 Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.902305 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c1b73638-f961-4dda-84e1-6fdab91a5fac","Type":"ContainerDied","Data":"f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1"} Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.902338 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c1b73638-f961-4dda-84e1-6fdab91a5fac","Type":"ContainerDied","Data":"6c97d4d28c93ae46d2857429ec33194d809fa35c51500765451f58ce68696e6d"} Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.902359 4932 scope.go:117] "RemoveContainer" containerID="f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1" Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.902392 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.907640 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjkxj\" (UniqueName: \"kubernetes.io/projected/c1b73638-f961-4dda-84e1-6fdab91a5fac-kube-api-access-pjkxj\") pod \"c1b73638-f961-4dda-84e1-6fdab91a5fac\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.907749 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-combined-ca-bundle\") pod \"c1b73638-f961-4dda-84e1-6fdab91a5fac\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.907910 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1b73638-f961-4dda-84e1-6fdab91a5fac-logs\") pod \"c1b73638-f961-4dda-84e1-6fdab91a5fac\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.907960 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-config-data\") pod \"c1b73638-f961-4dda-84e1-6fdab91a5fac\" (UID: \"c1b73638-f961-4dda-84e1-6fdab91a5fac\") " Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.909180 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1b73638-f961-4dda-84e1-6fdab91a5fac-logs" (OuterVolumeSpecName: "logs") pod "c1b73638-f961-4dda-84e1-6fdab91a5fac" (UID: "c1b73638-f961-4dda-84e1-6fdab91a5fac"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.910977 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1b73638-f961-4dda-84e1-6fdab91a5fac-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.924446 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1b73638-f961-4dda-84e1-6fdab91a5fac-kube-api-access-pjkxj" (OuterVolumeSpecName: "kube-api-access-pjkxj") pod "c1b73638-f961-4dda-84e1-6fdab91a5fac" (UID: "c1b73638-f961-4dda-84e1-6fdab91a5fac"). InnerVolumeSpecName "kube-api-access-pjkxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.949783 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1b73638-f961-4dda-84e1-6fdab91a5fac" (UID: "c1b73638-f961-4dda-84e1-6fdab91a5fac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.961146 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.964262 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-config-data" (OuterVolumeSpecName: "config-data") pod "c1b73638-f961-4dda-84e1-6fdab91a5fac" (UID: "c1b73638-f961-4dda-84e1-6fdab91a5fac"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:02 crc kubenswrapper[4932]: I1125 09:12:02.973666 4932 scope.go:117] "RemoveContainer" containerID="600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.012164 4932 scope.go:117] "RemoveContainer" containerID="f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1" Nov 25 09:12:03 crc kubenswrapper[4932]: E1125 09:12:03.013314 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1\": container with ID starting with f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1 not found: ID does not exist" containerID="f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.013355 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1"} err="failed to get container status \"f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1\": rpc error: code = NotFound desc = could not find container \"f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1\": container with ID starting with f1f8454b2c79dd91113a90f2f2192e4b9fcccba3bce218627876d23cc741a6d1 not found: ID does not exist" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.013381 4932 scope.go:117] "RemoveContainer" containerID="600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.013575 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjkxj\" (UniqueName: \"kubernetes.io/projected/c1b73638-f961-4dda-84e1-6fdab91a5fac-kube-api-access-pjkxj\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.013610 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.013629 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1b73638-f961-4dda-84e1-6fdab91a5fac-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:03 crc kubenswrapper[4932]: E1125 09:12:03.014432 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619\": container with ID starting with 600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619 not found: ID does not exist" containerID="600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.014468 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619"} err="failed to get container status \"600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619\": rpc error: code = NotFound desc = could not find container \"600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619\": container with ID starting with 600863da156dd58422094e553ddc4e045435f42f7457ce62dc27e529141a5619 not found: ID does not exist" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.114808 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-scripts\") pod \"e38a2272-cb35-490e-8ea3-672050e88c8a\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.114915 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvx8l\" (UniqueName: \"kubernetes.io/projected/e38a2272-cb35-490e-8ea3-672050e88c8a-kube-api-access-zvx8l\") pod \"e38a2272-cb35-490e-8ea3-672050e88c8a\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.114951 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-config-data\") pod \"e38a2272-cb35-490e-8ea3-672050e88c8a\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.115005 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-sg-core-conf-yaml\") pod \"e38a2272-cb35-490e-8ea3-672050e88c8a\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.115078 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-combined-ca-bundle\") pod \"e38a2272-cb35-490e-8ea3-672050e88c8a\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.115122 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-run-httpd\") pod \"e38a2272-cb35-490e-8ea3-672050e88c8a\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.115214 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-ceilometer-tls-certs\") pod \"e38a2272-cb35-490e-8ea3-672050e88c8a\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.115295 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-log-httpd\") pod \"e38a2272-cb35-490e-8ea3-672050e88c8a\" (UID: \"e38a2272-cb35-490e-8ea3-672050e88c8a\") " Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.115964 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e38a2272-cb35-490e-8ea3-672050e88c8a" (UID: "e38a2272-cb35-490e-8ea3-672050e88c8a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.116372 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e38a2272-cb35-490e-8ea3-672050e88c8a" (UID: "e38a2272-cb35-490e-8ea3-672050e88c8a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.117942 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-scripts" (OuterVolumeSpecName: "scripts") pod "e38a2272-cb35-490e-8ea3-672050e88c8a" (UID: "e38a2272-cb35-490e-8ea3-672050e88c8a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.124182 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e38a2272-cb35-490e-8ea3-672050e88c8a-kube-api-access-zvx8l" (OuterVolumeSpecName: "kube-api-access-zvx8l") pod "e38a2272-cb35-490e-8ea3-672050e88c8a" (UID: "e38a2272-cb35-490e-8ea3-672050e88c8a"). InnerVolumeSpecName "kube-api-access-zvx8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.142288 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e38a2272-cb35-490e-8ea3-672050e88c8a" (UID: "e38a2272-cb35-490e-8ea3-672050e88c8a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.174471 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e38a2272-cb35-490e-8ea3-672050e88c8a" (UID: "e38a2272-cb35-490e-8ea3-672050e88c8a"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.209365 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e38a2272-cb35-490e-8ea3-672050e88c8a" (UID: "e38a2272-cb35-490e-8ea3-672050e88c8a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.212365 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-config-data" (OuterVolumeSpecName: "config-data") pod "e38a2272-cb35-490e-8ea3-672050e88c8a" (UID: "e38a2272-cb35-490e-8ea3-672050e88c8a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.220725 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvx8l\" (UniqueName: \"kubernetes.io/projected/e38a2272-cb35-490e-8ea3-672050e88c8a-kube-api-access-zvx8l\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.220761 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.220776 4932 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.220788 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.220801 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.220812 4932 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.220823 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e38a2272-cb35-490e-8ea3-672050e88c8a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.220834 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e38a2272-cb35-490e-8ea3-672050e88c8a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.252989 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.277687 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.287645 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 09:12:03 crc kubenswrapper[4932]: E1125 09:12:03.288118 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerName="nova-api-api" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288144 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerName="nova-api-api" Nov 25 09:12:03 crc kubenswrapper[4932]: E1125 09:12:03.288179 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="ceilometer-central-agent" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288207 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="ceilometer-central-agent" Nov 25 09:12:03 crc kubenswrapper[4932]: E1125 09:12:03.288232 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="sg-core" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288243 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="sg-core" Nov 25 09:12:03 crc kubenswrapper[4932]: E1125 09:12:03.288259 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerName="nova-api-log" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288266 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerName="nova-api-log" Nov 25 09:12:03 crc kubenswrapper[4932]: E1125 09:12:03.288288 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="proxy-httpd" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288295 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="proxy-httpd" Nov 25 09:12:03 crc kubenswrapper[4932]: E1125 09:12:03.288311 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="ceilometer-notification-agent" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288319 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="ceilometer-notification-agent" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288531 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerName="nova-api-log" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288542 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1b73638-f961-4dda-84e1-6fdab91a5fac" containerName="nova-api-api" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288568 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="ceilometer-notification-agent" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288582 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="proxy-httpd" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288648 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="ceilometer-central-agent" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.288661 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" containerName="sg-core" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.289938 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.292353 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.292940 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.295566 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.298123 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.424233 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.424296 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-public-tls-certs\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.424324 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2mlz\" (UniqueName: \"kubernetes.io/projected/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-kube-api-access-v2mlz\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.424350 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-logs\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.424600 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.424686 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-config-data\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.527129 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.527259 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-config-data\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.527430 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.527493 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-public-tls-certs\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.527533 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2mlz\" (UniqueName: \"kubernetes.io/projected/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-kube-api-access-v2mlz\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.527578 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-logs\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.528112 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-logs\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.530994 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-config-data\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.531671 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.531782 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.533790 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-public-tls-certs\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.545723 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2mlz\" (UniqueName: \"kubernetes.io/projected/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-kube-api-access-v2mlz\") pod \"nova-api-0\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.623244 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.913823 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e38a2272-cb35-490e-8ea3-672050e88c8a","Type":"ContainerDied","Data":"f44482c0d2922b77fefb1aab38290fb92fb7af2f579f34ffa6deba43de5e2e8c"} Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.914153 4932 scope.go:117] "RemoveContainer" containerID="25d801f8101fc6eb6c2551500a95854455471e5066d6bc9c4d2efbdd92226c45" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.913901 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.947053 4932 scope.go:117] "RemoveContainer" containerID="d27e09fff1e0ff3c7fb6502dffa6adf6f46a0fb70748ecf30e635dfb400d5c61" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.958874 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.969893 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.976481 4932 scope.go:117] "RemoveContainer" containerID="2206e31168db9bc10c8df760e4ae067016aaf211011534726aab5d8d2c224708" Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.996242 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:12:03 crc kubenswrapper[4932]: I1125 09:12:03.999250 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.002417 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.003484 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.003710 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.024241 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.047100 4932 scope.go:117] "RemoveContainer" containerID="2e60a47f179a4f97dac0a9146bee6132afeceb8a758026ac1b3bb79b4fc2bbff" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.091248 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.138638 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.138700 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-scripts\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.138729 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2v88\" (UniqueName: \"kubernetes.io/projected/90db5718-c185-4863-888a-6cb41ca5339d-kube-api-access-g2v88\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.138747 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.138778 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-log-httpd\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.138827 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-run-httpd\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.138844 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.138861 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-config-data\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.240495 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-log-httpd\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.240577 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-run-httpd\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.240597 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.240619 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-config-data\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.240711 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.240763 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-scripts\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.240792 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2v88\" (UniqueName: \"kubernetes.io/projected/90db5718-c185-4863-888a-6cb41ca5339d-kube-api-access-g2v88\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.240815 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.242105 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-log-httpd\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.242160 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-run-httpd\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.247463 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-scripts\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.247819 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-config-data\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.248361 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.249593 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.249944 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.258961 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2v88\" (UniqueName: \"kubernetes.io/projected/90db5718-c185-4863-888a-6cb41ca5339d-kube-api-access-g2v88\") pod \"ceilometer-0\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.419838 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.619236 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1b73638-f961-4dda-84e1-6fdab91a5fac" path="/var/lib/kubelet/pods/c1b73638-f961-4dda-84e1-6fdab91a5fac/volumes" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.620290 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e38a2272-cb35-490e-8ea3-672050e88c8a" path="/var/lib/kubelet/pods/e38a2272-cb35-490e-8ea3-672050e88c8a/volumes" Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.912071 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.923898 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"90db5718-c185-4863-888a-6cb41ca5339d","Type":"ContainerStarted","Data":"9b3bbf21cc0cd1b693451b19c7b9b415c76cfc7281f20ceff7143d8d1ea4325a"} Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.925693 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c4eee0a5-1322-498d-8a4d-f6cb76cafacf","Type":"ContainerStarted","Data":"21a5d0df1942575438c4ffeb87b83b3214bff47585c2d3fb6e8ea21f3a0abea3"} Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.925717 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c4eee0a5-1322-498d-8a4d-f6cb76cafacf","Type":"ContainerStarted","Data":"5a9af99fec1a9e336010120706f5205cb52b0a56fb0e496623d4cdfc6e5717cd"} Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.925727 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c4eee0a5-1322-498d-8a4d-f6cb76cafacf","Type":"ContainerStarted","Data":"e4beb8b986568b5b97769224499ae0574f2b83556bc8f55fe93c220cbdb2e599"} Nov 25 09:12:04 crc kubenswrapper[4932]: I1125 09:12:04.956082 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=1.9560568809999999 podStartE2EDuration="1.956056881s" podCreationTimestamp="2025-11-25 09:12:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:12:04.946617101 +0000 UTC m=+1385.072646684" watchObservedRunningTime="2025-11-25 09:12:04.956056881 +0000 UTC m=+1385.082086444" Nov 25 09:12:05 crc kubenswrapper[4932]: I1125 09:12:05.938387 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"90db5718-c185-4863-888a-6cb41ca5339d","Type":"ContainerStarted","Data":"d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4"} Nov 25 09:12:06 crc kubenswrapper[4932]: I1125 09:12:06.229945 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:12:06 crc kubenswrapper[4932]: I1125 09:12:06.261355 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:12:06 crc kubenswrapper[4932]: I1125 09:12:06.750379 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:12:06 crc kubenswrapper[4932]: I1125 09:12:06.812613 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-bw9mt"] Nov 25 09:12:06 crc kubenswrapper[4932]: I1125 09:12:06.812900 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" podUID="da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" containerName="dnsmasq-dns" containerID="cri-o://71fd789f350620b59d2210426d201b277c0e3bcf014c5657123fd95a5d0459d1" gracePeriod=10 Nov 25 09:12:06 crc kubenswrapper[4932]: I1125 09:12:06.953675 4932 generic.go:334] "Generic (PLEG): container finished" podID="da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" containerID="71fd789f350620b59d2210426d201b277c0e3bcf014c5657123fd95a5d0459d1" exitCode=0 Nov 25 09:12:06 crc kubenswrapper[4932]: I1125 09:12:06.953743 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" event={"ID":"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8","Type":"ContainerDied","Data":"71fd789f350620b59d2210426d201b277c0e3bcf014c5657123fd95a5d0459d1"} Nov 25 09:12:06 crc kubenswrapper[4932]: I1125 09:12:06.957442 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"90db5718-c185-4863-888a-6cb41ca5339d","Type":"ContainerStarted","Data":"5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028"} Nov 25 09:12:06 crc kubenswrapper[4932]: I1125 09:12:06.957474 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"90db5718-c185-4863-888a-6cb41ca5339d","Type":"ContainerStarted","Data":"b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2"} Nov 25 09:12:06 crc kubenswrapper[4932]: I1125 09:12:06.979228 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.294367 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-bm59q"] Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.295563 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.297132 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.297610 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.306260 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-bm59q"] Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.358539 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.385507 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-nb\") pod \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.385582 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-swift-storage-0\") pod \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.385662 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-config\") pod \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.385709 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5b7s\" (UniqueName: \"kubernetes.io/projected/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-kube-api-access-v5b7s\") pod \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.385765 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-svc\") pod \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.385882 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-sb\") pod \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\" (UID: \"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8\") " Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.386309 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-config-data\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.386387 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-scripts\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.386439 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwxs5\" (UniqueName: \"kubernetes.io/projected/e0fb7667-bb72-4856-9492-7c0f783f3a7f-kube-api-access-zwxs5\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.386471 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.397748 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-kube-api-access-v5b7s" (OuterVolumeSpecName: "kube-api-access-v5b7s") pod "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" (UID: "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8"). InnerVolumeSpecName "kube-api-access-v5b7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.437401 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" (UID: "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.453998 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" (UID: "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.463892 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" (UID: "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.469144 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" (UID: "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.475560 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-config" (OuterVolumeSpecName: "config") pod "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" (UID: "da378a29-86a1-4b28-ba5e-37ec4f3e3fc8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.488902 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-config-data\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.488976 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-scripts\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.489018 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwxs5\" (UniqueName: \"kubernetes.io/projected/e0fb7667-bb72-4856-9492-7c0f783f3a7f-kube-api-access-zwxs5\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.489041 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.489134 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.489145 4932 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.489155 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.489165 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5b7s\" (UniqueName: \"kubernetes.io/projected/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-kube-api-access-v5b7s\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.489174 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.489201 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.493249 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.493989 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-scripts\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.495082 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-config-data\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.507822 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwxs5\" (UniqueName: \"kubernetes.io/projected/e0fb7667-bb72-4856-9492-7c0f783f3a7f-kube-api-access-zwxs5\") pod \"nova-cell1-cell-mapping-bm59q\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:07 crc kubenswrapper[4932]: I1125 09:12:07.674695 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:08 crc kubenswrapper[4932]: I1125 09:12:07.968527 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" event={"ID":"da378a29-86a1-4b28-ba5e-37ec4f3e3fc8","Type":"ContainerDied","Data":"118a8255d787fb7000280b5fd9f6a092c9aac11b18f00f35e1946b7c163836ac"} Nov 25 09:12:08 crc kubenswrapper[4932]: I1125 09:12:07.968597 4932 scope.go:117] "RemoveContainer" containerID="71fd789f350620b59d2210426d201b277c0e3bcf014c5657123fd95a5d0459d1" Nov 25 09:12:08 crc kubenswrapper[4932]: I1125 09:12:07.968611 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" Nov 25 09:12:08 crc kubenswrapper[4932]: I1125 09:12:08.023367 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-bw9mt"] Nov 25 09:12:08 crc kubenswrapper[4932]: I1125 09:12:08.023541 4932 scope.go:117] "RemoveContainer" containerID="f4a7675ac305600c60ae126df1a98bd624ca63d6310584a2191702cc5a4f0878" Nov 25 09:12:08 crc kubenswrapper[4932]: I1125 09:12:08.038039 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-bw9mt"] Nov 25 09:12:08 crc kubenswrapper[4932]: I1125 09:12:08.130025 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-bm59q"] Nov 25 09:12:08 crc kubenswrapper[4932]: W1125 09:12:08.130138 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode0fb7667_bb72_4856_9492_7c0f783f3a7f.slice/crio-c1cb5a7d5318f3fd75f77768f76cda039a94c4f3f182a22d321298d799566499 WatchSource:0}: Error finding container c1cb5a7d5318f3fd75f77768f76cda039a94c4f3f182a22d321298d799566499: Status 404 returned error can't find the container with id c1cb5a7d5318f3fd75f77768f76cda039a94c4f3f182a22d321298d799566499 Nov 25 09:12:08 crc kubenswrapper[4932]: I1125 09:12:08.628888 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" path="/var/lib/kubelet/pods/da378a29-86a1-4b28-ba5e-37ec4f3e3fc8/volumes" Nov 25 09:12:08 crc kubenswrapper[4932]: I1125 09:12:08.982724 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bm59q" event={"ID":"e0fb7667-bb72-4856-9492-7c0f783f3a7f","Type":"ContainerStarted","Data":"9be820186e32cbebad327f145eb2403d74b609391365b492967c9a6948bbcbe5"} Nov 25 09:12:08 crc kubenswrapper[4932]: I1125 09:12:08.982777 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bm59q" event={"ID":"e0fb7667-bb72-4856-9492-7c0f783f3a7f","Type":"ContainerStarted","Data":"c1cb5a7d5318f3fd75f77768f76cda039a94c4f3f182a22d321298d799566499"} Nov 25 09:12:09 crc kubenswrapper[4932]: I1125 09:12:09.015095 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-bm59q" podStartSLOduration=2.015072928 podStartE2EDuration="2.015072928s" podCreationTimestamp="2025-11-25 09:12:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:12:09.004901436 +0000 UTC m=+1389.130931009" watchObservedRunningTime="2025-11-25 09:12:09.015072928 +0000 UTC m=+1389.141102501" Nov 25 09:12:10 crc kubenswrapper[4932]: I1125 09:12:10.003542 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"90db5718-c185-4863-888a-6cb41ca5339d","Type":"ContainerStarted","Data":"44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d"} Nov 25 09:12:10 crc kubenswrapper[4932]: I1125 09:12:10.003902 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 09:12:10 crc kubenswrapper[4932]: I1125 09:12:10.047601 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.988024302 podStartE2EDuration="7.047583164s" podCreationTimestamp="2025-11-25 09:12:03 +0000 UTC" firstStartedPulling="2025-11-25 09:12:04.915286472 +0000 UTC m=+1385.041316035" lastFinishedPulling="2025-11-25 09:12:08.974845334 +0000 UTC m=+1389.100874897" observedRunningTime="2025-11-25 09:12:10.036886387 +0000 UTC m=+1390.162915950" watchObservedRunningTime="2025-11-25 09:12:10.047583164 +0000 UTC m=+1390.173612727" Nov 25 09:12:12 crc kubenswrapper[4932]: I1125 09:12:12.322335 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-64dbf5859c-bw9mt" podUID="da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.193:5353: i/o timeout" Nov 25 09:12:13 crc kubenswrapper[4932]: I1125 09:12:13.040496 4932 generic.go:334] "Generic (PLEG): container finished" podID="e0fb7667-bb72-4856-9492-7c0f783f3a7f" containerID="9be820186e32cbebad327f145eb2403d74b609391365b492967c9a6948bbcbe5" exitCode=0 Nov 25 09:12:13 crc kubenswrapper[4932]: I1125 09:12:13.040603 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bm59q" event={"ID":"e0fb7667-bb72-4856-9492-7c0f783f3a7f","Type":"ContainerDied","Data":"9be820186e32cbebad327f145eb2403d74b609391365b492967c9a6948bbcbe5"} Nov 25 09:12:13 crc kubenswrapper[4932]: I1125 09:12:13.623844 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:12:13 crc kubenswrapper[4932]: I1125 09:12:13.623965 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.477755 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.540054 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwxs5\" (UniqueName: \"kubernetes.io/projected/e0fb7667-bb72-4856-9492-7c0f783f3a7f-kube-api-access-zwxs5\") pod \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.540116 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-scripts\") pod \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.540228 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-config-data\") pod \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.540259 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-combined-ca-bundle\") pod \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\" (UID: \"e0fb7667-bb72-4856-9492-7c0f783f3a7f\") " Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.549641 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0fb7667-bb72-4856-9492-7c0f783f3a7f-kube-api-access-zwxs5" (OuterVolumeSpecName: "kube-api-access-zwxs5") pod "e0fb7667-bb72-4856-9492-7c0f783f3a7f" (UID: "e0fb7667-bb72-4856-9492-7c0f783f3a7f"). InnerVolumeSpecName "kube-api-access-zwxs5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.550158 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-scripts" (OuterVolumeSpecName: "scripts") pod "e0fb7667-bb72-4856-9492-7c0f783f3a7f" (UID: "e0fb7667-bb72-4856-9492-7c0f783f3a7f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.581880 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-config-data" (OuterVolumeSpecName: "config-data") pod "e0fb7667-bb72-4856-9492-7c0f783f3a7f" (UID: "e0fb7667-bb72-4856-9492-7c0f783f3a7f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.593284 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e0fb7667-bb72-4856-9492-7c0f783f3a7f" (UID: "e0fb7667-bb72-4856-9492-7c0f783f3a7f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.641519 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.641528 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.642756 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.642783 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.642808 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwxs5\" (UniqueName: \"kubernetes.io/projected/e0fb7667-bb72-4856-9492-7c0f783f3a7f-kube-api-access-zwxs5\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:14 crc kubenswrapper[4932]: I1125 09:12:14.642829 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0fb7667-bb72-4856-9492-7c0f783f3a7f-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:15 crc kubenswrapper[4932]: I1125 09:12:15.068901 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-bm59q" event={"ID":"e0fb7667-bb72-4856-9492-7c0f783f3a7f","Type":"ContainerDied","Data":"c1cb5a7d5318f3fd75f77768f76cda039a94c4f3f182a22d321298d799566499"} Nov 25 09:12:15 crc kubenswrapper[4932]: I1125 09:12:15.069640 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1cb5a7d5318f3fd75f77768f76cda039a94c4f3f182a22d321298d799566499" Nov 25 09:12:15 crc kubenswrapper[4932]: I1125 09:12:15.068995 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-bm59q" Nov 25 09:12:15 crc kubenswrapper[4932]: I1125 09:12:15.267745 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:12:15 crc kubenswrapper[4932]: I1125 09:12:15.268082 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerName="nova-api-log" containerID="cri-o://5a9af99fec1a9e336010120706f5205cb52b0a56fb0e496623d4cdfc6e5717cd" gracePeriod=30 Nov 25 09:12:15 crc kubenswrapper[4932]: I1125 09:12:15.268336 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerName="nova-api-api" containerID="cri-o://21a5d0df1942575438c4ffeb87b83b3214bff47585c2d3fb6e8ea21f3a0abea3" gracePeriod=30 Nov 25 09:12:15 crc kubenswrapper[4932]: I1125 09:12:15.289779 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:12:15 crc kubenswrapper[4932]: I1125 09:12:15.290060 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="ac2f09cf-97e0-4446-9fc6-04bd3ffba71b" containerName="nova-scheduler-scheduler" containerID="cri-o://2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf" gracePeriod=30 Nov 25 09:12:15 crc kubenswrapper[4932]: I1125 09:12:15.400375 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:12:15 crc kubenswrapper[4932]: I1125 09:12:15.401074 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerName="nova-metadata-log" containerID="cri-o://105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0" gracePeriod=30 Nov 25 09:12:15 crc kubenswrapper[4932]: I1125 09:12:15.401205 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerName="nova-metadata-metadata" containerID="cri-o://acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73" gracePeriod=30 Nov 25 09:12:15 crc kubenswrapper[4932]: E1125 09:12:15.588877 4932 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4eee0a5_1322_498d_8a4d_f6cb76cafacf.slice/crio-conmon-5a9af99fec1a9e336010120706f5205cb52b0a56fb0e496623d4cdfc6e5717cd.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:12:16 crc kubenswrapper[4932]: I1125 09:12:16.079486 4932 generic.go:334] "Generic (PLEG): container finished" podID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerID="5a9af99fec1a9e336010120706f5205cb52b0a56fb0e496623d4cdfc6e5717cd" exitCode=143 Nov 25 09:12:16 crc kubenswrapper[4932]: I1125 09:12:16.079524 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c4eee0a5-1322-498d-8a4d-f6cb76cafacf","Type":"ContainerDied","Data":"5a9af99fec1a9e336010120706f5205cb52b0a56fb0e496623d4cdfc6e5717cd"} Nov 25 09:12:16 crc kubenswrapper[4932]: I1125 09:12:16.081914 4932 generic.go:334] "Generic (PLEG): container finished" podID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerID="105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0" exitCode=143 Nov 25 09:12:16 crc kubenswrapper[4932]: I1125 09:12:16.081957 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85938f2-0bc7-42f6-9b98-ceee092a8b19","Type":"ContainerDied","Data":"105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0"} Nov 25 09:12:16 crc kubenswrapper[4932]: I1125 09:12:16.939066 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.090783 4932 generic.go:334] "Generic (PLEG): container finished" podID="ac2f09cf-97e0-4446-9fc6-04bd3ffba71b" containerID="2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf" exitCode=0 Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.091094 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b","Type":"ContainerDied","Data":"2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf"} Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.091123 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b","Type":"ContainerDied","Data":"5d69eacfad374e39bf33d92e3276835c1d00c339cf257bab0a5bb120f4093378"} Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.091142 4932 scope.go:117] "RemoveContainer" containerID="2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.091326 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.093087 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-combined-ca-bundle\") pod \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.093141 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pc9gj\" (UniqueName: \"kubernetes.io/projected/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-kube-api-access-pc9gj\") pod \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.093329 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-config-data\") pod \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\" (UID: \"ac2f09cf-97e0-4446-9fc6-04bd3ffba71b\") " Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.101162 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-kube-api-access-pc9gj" (OuterVolumeSpecName: "kube-api-access-pc9gj") pod "ac2f09cf-97e0-4446-9fc6-04bd3ffba71b" (UID: "ac2f09cf-97e0-4446-9fc6-04bd3ffba71b"). InnerVolumeSpecName "kube-api-access-pc9gj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.123125 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-config-data" (OuterVolumeSpecName: "config-data") pod "ac2f09cf-97e0-4446-9fc6-04bd3ffba71b" (UID: "ac2f09cf-97e0-4446-9fc6-04bd3ffba71b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.125715 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ac2f09cf-97e0-4446-9fc6-04bd3ffba71b" (UID: "ac2f09cf-97e0-4446-9fc6-04bd3ffba71b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.195588 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.195623 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pc9gj\" (UniqueName: \"kubernetes.io/projected/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-kube-api-access-pc9gj\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.195640 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.203394 4932 scope.go:117] "RemoveContainer" containerID="2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf" Nov 25 09:12:17 crc kubenswrapper[4932]: E1125 09:12:17.203711 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf\": container with ID starting with 2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf not found: ID does not exist" containerID="2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.203741 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf"} err="failed to get container status \"2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf\": rpc error: code = NotFound desc = could not find container \"2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf\": container with ID starting with 2983cf5e43a0bd2340480d31bdb57582c3ceea32a7b92a3059960c5588df67cf not found: ID does not exist" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.421214 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.430548 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.450694 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:12:17 crc kubenswrapper[4932]: E1125 09:12:17.451064 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac2f09cf-97e0-4446-9fc6-04bd3ffba71b" containerName="nova-scheduler-scheduler" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.451083 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac2f09cf-97e0-4446-9fc6-04bd3ffba71b" containerName="nova-scheduler-scheduler" Nov 25 09:12:17 crc kubenswrapper[4932]: E1125 09:12:17.451097 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" containerName="dnsmasq-dns" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.451106 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" containerName="dnsmasq-dns" Nov 25 09:12:17 crc kubenswrapper[4932]: E1125 09:12:17.451130 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0fb7667-bb72-4856-9492-7c0f783f3a7f" containerName="nova-manage" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.451141 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0fb7667-bb72-4856-9492-7c0f783f3a7f" containerName="nova-manage" Nov 25 09:12:17 crc kubenswrapper[4932]: E1125 09:12:17.451168 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" containerName="init" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.451175 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" containerName="init" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.451393 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="da378a29-86a1-4b28-ba5e-37ec4f3e3fc8" containerName="dnsmasq-dns" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.451409 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac2f09cf-97e0-4446-9fc6-04bd3ffba71b" containerName="nova-scheduler-scheduler" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.451429 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0fb7667-bb72-4856-9492-7c0f783f3a7f" containerName="nova-manage" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.452034 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.466034 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.498932 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.602681 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.603007 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-config-data\") pod \"nova-scheduler-0\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.603091 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w94hg\" (UniqueName: \"kubernetes.io/projected/186ced68-a489-410c-afa6-d4d623c37fc1-kube-api-access-w94hg\") pod \"nova-scheduler-0\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.705029 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.705154 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-config-data\") pod \"nova-scheduler-0\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.705178 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w94hg\" (UniqueName: \"kubernetes.io/projected/186ced68-a489-410c-afa6-d4d623c37fc1-kube-api-access-w94hg\") pod \"nova-scheduler-0\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.710371 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.710846 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-config-data\") pod \"nova-scheduler-0\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.723698 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w94hg\" (UniqueName: \"kubernetes.io/projected/186ced68-a489-410c-afa6-d4d623c37fc1-kube-api-access-w94hg\") pod \"nova-scheduler-0\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " pod="openstack/nova-scheduler-0" Nov 25 09:12:17 crc kubenswrapper[4932]: I1125 09:12:17.826811 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:12:18 crc kubenswrapper[4932]: I1125 09:12:18.258730 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:12:18 crc kubenswrapper[4932]: W1125 09:12:18.268881 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod186ced68_a489_410c_afa6_d4d623c37fc1.slice/crio-2c95ef3ee48e374fa43a0d30f685f4e1d459ccc6e3f652192c87c848ba65d5cb WatchSource:0}: Error finding container 2c95ef3ee48e374fa43a0d30f685f4e1d459ccc6e3f652192c87c848ba65d5cb: Status 404 returned error can't find the container with id 2c95ef3ee48e374fa43a0d30f685f4e1d459ccc6e3f652192c87c848ba65d5cb Nov 25 09:12:18 crc kubenswrapper[4932]: I1125 09:12:18.621433 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac2f09cf-97e0-4446-9fc6-04bd3ffba71b" path="/var/lib/kubelet/pods/ac2f09cf-97e0-4446-9fc6-04bd3ffba71b/volumes" Nov 25 09:12:18 crc kubenswrapper[4932]: I1125 09:12:18.977457 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.030761 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a85938f2-0bc7-42f6-9b98-ceee092a8b19-logs\") pod \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.030872 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9vwn\" (UniqueName: \"kubernetes.io/projected/a85938f2-0bc7-42f6-9b98-ceee092a8b19-kube-api-access-f9vwn\") pod \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.030903 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-config-data\") pod \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.030940 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-nova-metadata-tls-certs\") pod \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.030985 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-combined-ca-bundle\") pod \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\" (UID: \"a85938f2-0bc7-42f6-9b98-ceee092a8b19\") " Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.031478 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a85938f2-0bc7-42f6-9b98-ceee092a8b19-logs" (OuterVolumeSpecName: "logs") pod "a85938f2-0bc7-42f6-9b98-ceee092a8b19" (UID: "a85938f2-0bc7-42f6-9b98-ceee092a8b19"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.031779 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a85938f2-0bc7-42f6-9b98-ceee092a8b19-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.056343 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a85938f2-0bc7-42f6-9b98-ceee092a8b19-kube-api-access-f9vwn" (OuterVolumeSpecName: "kube-api-access-f9vwn") pod "a85938f2-0bc7-42f6-9b98-ceee092a8b19" (UID: "a85938f2-0bc7-42f6-9b98-ceee092a8b19"). InnerVolumeSpecName "kube-api-access-f9vwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.063055 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-config-data" (OuterVolumeSpecName: "config-data") pod "a85938f2-0bc7-42f6-9b98-ceee092a8b19" (UID: "a85938f2-0bc7-42f6-9b98-ceee092a8b19"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.097425 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a85938f2-0bc7-42f6-9b98-ceee092a8b19" (UID: "a85938f2-0bc7-42f6-9b98-ceee092a8b19"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.124991 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "a85938f2-0bc7-42f6-9b98-ceee092a8b19" (UID: "a85938f2-0bc7-42f6-9b98-ceee092a8b19"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.130282 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"186ced68-a489-410c-afa6-d4d623c37fc1","Type":"ContainerStarted","Data":"c2a6c3a1e0b539444b03c2c2b147c48f0e4e50e3895eb1146918d21fcc6cd271"} Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.130328 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"186ced68-a489-410c-afa6-d4d623c37fc1","Type":"ContainerStarted","Data":"2c95ef3ee48e374fa43a0d30f685f4e1d459ccc6e3f652192c87c848ba65d5cb"} Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.132164 4932 generic.go:334] "Generic (PLEG): container finished" podID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerID="acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73" exitCode=0 Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.132206 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85938f2-0bc7-42f6-9b98-ceee092a8b19","Type":"ContainerDied","Data":"acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73"} Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.132224 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a85938f2-0bc7-42f6-9b98-ceee092a8b19","Type":"ContainerDied","Data":"174d597657371d0c0a031d32db846e277201febb711caebca0b962c3ec5d9d00"} Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.132240 4932 scope.go:117] "RemoveContainer" containerID="acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.132260 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.134020 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9vwn\" (UniqueName: \"kubernetes.io/projected/a85938f2-0bc7-42f6-9b98-ceee092a8b19-kube-api-access-f9vwn\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.134098 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.134121 4932 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.134137 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a85938f2-0bc7-42f6-9b98-ceee092a8b19-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.159658 4932 scope.go:117] "RemoveContainer" containerID="105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.178887 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.178865739 podStartE2EDuration="2.178865739s" podCreationTimestamp="2025-11-25 09:12:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:12:19.147555781 +0000 UTC m=+1399.273585364" watchObservedRunningTime="2025-11-25 09:12:19.178865739 +0000 UTC m=+1399.304895302" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.186565 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.192003 4932 scope.go:117] "RemoveContainer" containerID="acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73" Nov 25 09:12:19 crc kubenswrapper[4932]: E1125 09:12:19.193600 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73\": container with ID starting with acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73 not found: ID does not exist" containerID="acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.193652 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73"} err="failed to get container status \"acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73\": rpc error: code = NotFound desc = could not find container \"acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73\": container with ID starting with acb06a9ccf6e364ed3cf61b4b227231e770e24530386eecf9b5dbd118dd14a73 not found: ID does not exist" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.193683 4932 scope.go:117] "RemoveContainer" containerID="105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0" Nov 25 09:12:19 crc kubenswrapper[4932]: E1125 09:12:19.194041 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0\": container with ID starting with 105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0 not found: ID does not exist" containerID="105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.194065 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0"} err="failed to get container status \"105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0\": rpc error: code = NotFound desc = could not find container \"105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0\": container with ID starting with 105e71372bc5d823238329ac12155abba7594ccc88c8927a061d9c8d483ca3d0 not found: ID does not exist" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.203960 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.213306 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:12:19 crc kubenswrapper[4932]: E1125 09:12:19.213745 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerName="nova-metadata-metadata" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.213761 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerName="nova-metadata-metadata" Nov 25 09:12:19 crc kubenswrapper[4932]: E1125 09:12:19.213797 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerName="nova-metadata-log" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.213804 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerName="nova-metadata-log" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.214025 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerName="nova-metadata-log" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.214044 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" containerName="nova-metadata-metadata" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.215075 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.220616 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.220797 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.223052 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.234885 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-478km\" (UniqueName: \"kubernetes.io/projected/90c30cef-5376-4f4a-8d59-9ab6daff902d-kube-api-access-478km\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.237119 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-config-data\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.237309 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90c30cef-5376-4f4a-8d59-9ab6daff902d-logs\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.237343 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.237404 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.339423 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-config-data\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.339518 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90c30cef-5376-4f4a-8d59-9ab6daff902d-logs\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.339540 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.339568 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.339633 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-478km\" (UniqueName: \"kubernetes.io/projected/90c30cef-5376-4f4a-8d59-9ab6daff902d-kube-api-access-478km\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.340130 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90c30cef-5376-4f4a-8d59-9ab6daff902d-logs\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.343810 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.344473 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-config-data\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.344896 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.359381 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-478km\" (UniqueName: \"kubernetes.io/projected/90c30cef-5376-4f4a-8d59-9ab6daff902d-kube-api-access-478km\") pod \"nova-metadata-0\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " pod="openstack/nova-metadata-0" Nov 25 09:12:19 crc kubenswrapper[4932]: I1125 09:12:19.541145 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.028343 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.147354 4932 generic.go:334] "Generic (PLEG): container finished" podID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerID="21a5d0df1942575438c4ffeb87b83b3214bff47585c2d3fb6e8ea21f3a0abea3" exitCode=0 Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.147541 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c4eee0a5-1322-498d-8a4d-f6cb76cafacf","Type":"ContainerDied","Data":"21a5d0df1942575438c4ffeb87b83b3214bff47585c2d3fb6e8ea21f3a0abea3"} Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.147774 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c4eee0a5-1322-498d-8a4d-f6cb76cafacf","Type":"ContainerDied","Data":"e4beb8b986568b5b97769224499ae0574f2b83556bc8f55fe93c220cbdb2e599"} Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.147794 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4beb8b986568b5b97769224499ae0574f2b83556bc8f55fe93c220cbdb2e599" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.149807 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"90c30cef-5376-4f4a-8d59-9ab6daff902d","Type":"ContainerStarted","Data":"ceb3b42ad1b5a2d2807be40562bba836fb84b40cd4d5af68cf85ad5b467e9bd1"} Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.150691 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.153223 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-config-data\") pod \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.153288 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-combined-ca-bundle\") pod \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.153313 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-logs\") pod \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.153344 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-public-tls-certs\") pod \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.153361 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-internal-tls-certs\") pod \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.153384 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2mlz\" (UniqueName: \"kubernetes.io/projected/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-kube-api-access-v2mlz\") pod \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\" (UID: \"c4eee0a5-1322-498d-8a4d-f6cb76cafacf\") " Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.155954 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-logs" (OuterVolumeSpecName: "logs") pod "c4eee0a5-1322-498d-8a4d-f6cb76cafacf" (UID: "c4eee0a5-1322-498d-8a4d-f6cb76cafacf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.159884 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-kube-api-access-v2mlz" (OuterVolumeSpecName: "kube-api-access-v2mlz") pod "c4eee0a5-1322-498d-8a4d-f6cb76cafacf" (UID: "c4eee0a5-1322-498d-8a4d-f6cb76cafacf"). InnerVolumeSpecName "kube-api-access-v2mlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.215268 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-config-data" (OuterVolumeSpecName: "config-data") pod "c4eee0a5-1322-498d-8a4d-f6cb76cafacf" (UID: "c4eee0a5-1322-498d-8a4d-f6cb76cafacf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.216736 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c4eee0a5-1322-498d-8a4d-f6cb76cafacf" (UID: "c4eee0a5-1322-498d-8a4d-f6cb76cafacf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.219644 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c4eee0a5-1322-498d-8a4d-f6cb76cafacf" (UID: "c4eee0a5-1322-498d-8a4d-f6cb76cafacf"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.248856 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c4eee0a5-1322-498d-8a4d-f6cb76cafacf" (UID: "c4eee0a5-1322-498d-8a4d-f6cb76cafacf"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.255664 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.255705 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.255720 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.255732 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.255744 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.255755 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2mlz\" (UniqueName: \"kubernetes.io/projected/c4eee0a5-1322-498d-8a4d-f6cb76cafacf-kube-api-access-v2mlz\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:20 crc kubenswrapper[4932]: I1125 09:12:20.637561 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a85938f2-0bc7-42f6-9b98-ceee092a8b19" path="/var/lib/kubelet/pods/a85938f2-0bc7-42f6-9b98-ceee092a8b19/volumes" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.172819 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"90c30cef-5376-4f4a-8d59-9ab6daff902d","Type":"ContainerStarted","Data":"7d27611ad3f8e0e548937326ec5872d5fd17ef030c916731538091ee33f8c092"} Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.172923 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"90c30cef-5376-4f4a-8d59-9ab6daff902d","Type":"ContainerStarted","Data":"ef08398ee58bcf8e60c93b50283ca6afdcbca3b7a33b1eac1c91a89fe5b90230"} Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.172844 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.203831 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.203804441 podStartE2EDuration="2.203804441s" podCreationTimestamp="2025-11-25 09:12:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:12:21.198988172 +0000 UTC m=+1401.325017745" watchObservedRunningTime="2025-11-25 09:12:21.203804441 +0000 UTC m=+1401.329834024" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.226734 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.245324 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.254563 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 09:12:21 crc kubenswrapper[4932]: E1125 09:12:21.255246 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerName="nova-api-log" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.255345 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerName="nova-api-log" Nov 25 09:12:21 crc kubenswrapper[4932]: E1125 09:12:21.255435 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerName="nova-api-api" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.255500 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerName="nova-api-api" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.255796 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerName="nova-api-log" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.255898 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" containerName="nova-api-api" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.257268 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.261996 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.262351 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.262861 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.264249 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.379508 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-public-tls-certs\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.379550 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dvsc\" (UniqueName: \"kubernetes.io/projected/31823923-9ce9-49e0-b4c1-42418d49918c-kube-api-access-9dvsc\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.379592 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31823923-9ce9-49e0-b4c1-42418d49918c-logs\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.379659 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.380031 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-config-data\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.380206 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.482203 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-public-tls-certs\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.482269 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dvsc\" (UniqueName: \"kubernetes.io/projected/31823923-9ce9-49e0-b4c1-42418d49918c-kube-api-access-9dvsc\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.482321 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31823923-9ce9-49e0-b4c1-42418d49918c-logs\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.482389 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.482442 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-config-data\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.482482 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.483008 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31823923-9ce9-49e0-b4c1-42418d49918c-logs\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.488457 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.488457 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-internal-tls-certs\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.490066 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-config-data\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.492581 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-public-tls-certs\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.508477 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dvsc\" (UniqueName: \"kubernetes.io/projected/31823923-9ce9-49e0-b4c1-42418d49918c-kube-api-access-9dvsc\") pod \"nova-api-0\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " pod="openstack/nova-api-0" Nov 25 09:12:21 crc kubenswrapper[4932]: I1125 09:12:21.579545 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:12:22 crc kubenswrapper[4932]: I1125 09:12:22.019259 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:12:22 crc kubenswrapper[4932]: I1125 09:12:22.185992 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31823923-9ce9-49e0-b4c1-42418d49918c","Type":"ContainerStarted","Data":"9364cfc667ba84209a56fce2ce4da8c6edded2af654c8375e60e8d2e2d7cced1"} Nov 25 09:12:22 crc kubenswrapper[4932]: I1125 09:12:22.624091 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4eee0a5-1322-498d-8a4d-f6cb76cafacf" path="/var/lib/kubelet/pods/c4eee0a5-1322-498d-8a4d-f6cb76cafacf/volumes" Nov 25 09:12:22 crc kubenswrapper[4932]: I1125 09:12:22.827809 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 09:12:23 crc kubenswrapper[4932]: I1125 09:12:23.197588 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31823923-9ce9-49e0-b4c1-42418d49918c","Type":"ContainerStarted","Data":"7247d6a20300098ab3cb5a4ccdeaecb8b01f9585ec29af77a8b23a178fb313d8"} Nov 25 09:12:23 crc kubenswrapper[4932]: I1125 09:12:23.197652 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31823923-9ce9-49e0-b4c1-42418d49918c","Type":"ContainerStarted","Data":"4208013c150414e2c4e6a9db4af0a0ed4445d68f363fa9be56e13050961d4b79"} Nov 25 09:12:23 crc kubenswrapper[4932]: I1125 09:12:23.241911 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.24187895 podStartE2EDuration="2.24187895s" podCreationTimestamp="2025-11-25 09:12:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:12:23.224181132 +0000 UTC m=+1403.350210785" watchObservedRunningTime="2025-11-25 09:12:23.24187895 +0000 UTC m=+1403.367908553" Nov 25 09:12:24 crc kubenswrapper[4932]: I1125 09:12:24.541659 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 09:12:24 crc kubenswrapper[4932]: I1125 09:12:24.542149 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 09:12:27 crc kubenswrapper[4932]: I1125 09:12:27.828332 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 09:12:27 crc kubenswrapper[4932]: I1125 09:12:27.856044 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 09:12:28 crc kubenswrapper[4932]: I1125 09:12:28.316968 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 09:12:29 crc kubenswrapper[4932]: I1125 09:12:29.541422 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 09:12:29 crc kubenswrapper[4932]: I1125 09:12:29.541740 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 09:12:30 crc kubenswrapper[4932]: I1125 09:12:30.555546 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 09:12:30 crc kubenswrapper[4932]: I1125 09:12:30.555558 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 09:12:31 crc kubenswrapper[4932]: I1125 09:12:31.580410 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:12:31 crc kubenswrapper[4932]: I1125 09:12:31.580699 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 09:12:32 crc kubenswrapper[4932]: I1125 09:12:32.594372 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="31823923-9ce9-49e0-b4c1-42418d49918c" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.208:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 09:12:32 crc kubenswrapper[4932]: I1125 09:12:32.594399 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="31823923-9ce9-49e0-b4c1-42418d49918c" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.208:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 09:12:34 crc kubenswrapper[4932]: I1125 09:12:34.430403 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.599507 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lkbcm"] Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.602983 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.618684 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-utilities\") pod \"redhat-marketplace-lkbcm\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.618771 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgckr\" (UniqueName: \"kubernetes.io/projected/3b649a40-7fa0-4276-94eb-1a612f661578-kube-api-access-mgckr\") pod \"redhat-marketplace-lkbcm\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.618933 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-catalog-content\") pod \"redhat-marketplace-lkbcm\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.661872 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lkbcm"] Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.749748 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-utilities\") pod \"redhat-marketplace-lkbcm\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.749796 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgckr\" (UniqueName: \"kubernetes.io/projected/3b649a40-7fa0-4276-94eb-1a612f661578-kube-api-access-mgckr\") pod \"redhat-marketplace-lkbcm\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.749835 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-catalog-content\") pod \"redhat-marketplace-lkbcm\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.750437 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-utilities\") pod \"redhat-marketplace-lkbcm\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.750464 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-catalog-content\") pod \"redhat-marketplace-lkbcm\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.783960 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgckr\" (UniqueName: \"kubernetes.io/projected/3b649a40-7fa0-4276-94eb-1a612f661578-kube-api-access-mgckr\") pod \"redhat-marketplace-lkbcm\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:38 crc kubenswrapper[4932]: I1125 09:12:38.941721 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:39 crc kubenswrapper[4932]: W1125 09:12:39.419591 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b649a40_7fa0_4276_94eb_1a612f661578.slice/crio-c70be004a5bd21a1d679a4011aed6f8250e4a1c9ede7f01f92f9408ce3e4c154 WatchSource:0}: Error finding container c70be004a5bd21a1d679a4011aed6f8250e4a1c9ede7f01f92f9408ce3e4c154: Status 404 returned error can't find the container with id c70be004a5bd21a1d679a4011aed6f8250e4a1c9ede7f01f92f9408ce3e4c154 Nov 25 09:12:39 crc kubenswrapper[4932]: I1125 09:12:39.425520 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lkbcm"] Nov 25 09:12:39 crc kubenswrapper[4932]: I1125 09:12:39.546729 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 09:12:39 crc kubenswrapper[4932]: I1125 09:12:39.547569 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 09:12:39 crc kubenswrapper[4932]: I1125 09:12:39.553464 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 09:12:40 crc kubenswrapper[4932]: I1125 09:12:40.391827 4932 generic.go:334] "Generic (PLEG): container finished" podID="3b649a40-7fa0-4276-94eb-1a612f661578" containerID="4bf937effda2558a987ce6403f367f8e2502043750c10cd24461ca0e98f7c8db" exitCode=0 Nov 25 09:12:40 crc kubenswrapper[4932]: I1125 09:12:40.391884 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkbcm" event={"ID":"3b649a40-7fa0-4276-94eb-1a612f661578","Type":"ContainerDied","Data":"4bf937effda2558a987ce6403f367f8e2502043750c10cd24461ca0e98f7c8db"} Nov 25 09:12:40 crc kubenswrapper[4932]: I1125 09:12:40.392987 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkbcm" event={"ID":"3b649a40-7fa0-4276-94eb-1a612f661578","Type":"ContainerStarted","Data":"c70be004a5bd21a1d679a4011aed6f8250e4a1c9ede7f01f92f9408ce3e4c154"} Nov 25 09:12:40 crc kubenswrapper[4932]: I1125 09:12:40.403671 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 09:12:41 crc kubenswrapper[4932]: I1125 09:12:41.402730 4932 generic.go:334] "Generic (PLEG): container finished" podID="3b649a40-7fa0-4276-94eb-1a612f661578" containerID="b14798d5917cc61f3b00ba7f8d85b2da7547a76549fb09217f40aeeecb75dba8" exitCode=0 Nov 25 09:12:41 crc kubenswrapper[4932]: I1125 09:12:41.402838 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkbcm" event={"ID":"3b649a40-7fa0-4276-94eb-1a612f661578","Type":"ContainerDied","Data":"b14798d5917cc61f3b00ba7f8d85b2da7547a76549fb09217f40aeeecb75dba8"} Nov 25 09:12:41 crc kubenswrapper[4932]: I1125 09:12:41.587918 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 09:12:41 crc kubenswrapper[4932]: I1125 09:12:41.588487 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 09:12:41 crc kubenswrapper[4932]: I1125 09:12:41.588848 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 09:12:41 crc kubenswrapper[4932]: I1125 09:12:41.593411 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 09:12:42 crc kubenswrapper[4932]: I1125 09:12:42.426486 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkbcm" event={"ID":"3b649a40-7fa0-4276-94eb-1a612f661578","Type":"ContainerStarted","Data":"b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7"} Nov 25 09:12:42 crc kubenswrapper[4932]: I1125 09:12:42.426792 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 09:12:42 crc kubenswrapper[4932]: I1125 09:12:42.436109 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 09:12:42 crc kubenswrapper[4932]: I1125 09:12:42.453399 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lkbcm" podStartSLOduration=2.973319386 podStartE2EDuration="4.453381079s" podCreationTimestamp="2025-11-25 09:12:38 +0000 UTC" firstStartedPulling="2025-11-25 09:12:40.394356369 +0000 UTC m=+1420.520385952" lastFinishedPulling="2025-11-25 09:12:41.874418052 +0000 UTC m=+1422.000447645" observedRunningTime="2025-11-25 09:12:42.445023979 +0000 UTC m=+1422.571053552" watchObservedRunningTime="2025-11-25 09:12:42.453381079 +0000 UTC m=+1422.579410642" Nov 25 09:12:48 crc kubenswrapper[4932]: I1125 09:12:48.942592 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:48 crc kubenswrapper[4932]: I1125 09:12:48.943258 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:49 crc kubenswrapper[4932]: I1125 09:12:49.007643 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:49 crc kubenswrapper[4932]: I1125 09:12:49.637076 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:49 crc kubenswrapper[4932]: I1125 09:12:49.686534 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lkbcm"] Nov 25 09:12:51 crc kubenswrapper[4932]: I1125 09:12:51.567555 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lkbcm" podUID="3b649a40-7fa0-4276-94eb-1a612f661578" containerName="registry-server" containerID="cri-o://b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7" gracePeriod=2 Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.116649 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.242227 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-catalog-content\") pod \"3b649a40-7fa0-4276-94eb-1a612f661578\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.242441 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgckr\" (UniqueName: \"kubernetes.io/projected/3b649a40-7fa0-4276-94eb-1a612f661578-kube-api-access-mgckr\") pod \"3b649a40-7fa0-4276-94eb-1a612f661578\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.242483 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-utilities\") pod \"3b649a40-7fa0-4276-94eb-1a612f661578\" (UID: \"3b649a40-7fa0-4276-94eb-1a612f661578\") " Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.243611 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-utilities" (OuterVolumeSpecName: "utilities") pod "3b649a40-7fa0-4276-94eb-1a612f661578" (UID: "3b649a40-7fa0-4276-94eb-1a612f661578"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.248349 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b649a40-7fa0-4276-94eb-1a612f661578-kube-api-access-mgckr" (OuterVolumeSpecName: "kube-api-access-mgckr") pod "3b649a40-7fa0-4276-94eb-1a612f661578" (UID: "3b649a40-7fa0-4276-94eb-1a612f661578"). InnerVolumeSpecName "kube-api-access-mgckr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.263749 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3b649a40-7fa0-4276-94eb-1a612f661578" (UID: "3b649a40-7fa0-4276-94eb-1a612f661578"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.345518 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.345600 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgckr\" (UniqueName: \"kubernetes.io/projected/3b649a40-7fa0-4276-94eb-1a612f661578-kube-api-access-mgckr\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.345613 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b649a40-7fa0-4276-94eb-1a612f661578-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.580978 4932 generic.go:334] "Generic (PLEG): container finished" podID="3b649a40-7fa0-4276-94eb-1a612f661578" containerID="b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7" exitCode=0 Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.581022 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkbcm" event={"ID":"3b649a40-7fa0-4276-94eb-1a612f661578","Type":"ContainerDied","Data":"b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7"} Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.581082 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lkbcm" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.581101 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lkbcm" event={"ID":"3b649a40-7fa0-4276-94eb-1a612f661578","Type":"ContainerDied","Data":"c70be004a5bd21a1d679a4011aed6f8250e4a1c9ede7f01f92f9408ce3e4c154"} Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.581144 4932 scope.go:117] "RemoveContainer" containerID="b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.608478 4932 scope.go:117] "RemoveContainer" containerID="b14798d5917cc61f3b00ba7f8d85b2da7547a76549fb09217f40aeeecb75dba8" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.633857 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lkbcm"] Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.645024 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lkbcm"] Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.661955 4932 scope.go:117] "RemoveContainer" containerID="4bf937effda2558a987ce6403f367f8e2502043750c10cd24461ca0e98f7c8db" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.697322 4932 scope.go:117] "RemoveContainer" containerID="b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7" Nov 25 09:12:52 crc kubenswrapper[4932]: E1125 09:12:52.698075 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7\": container with ID starting with b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7 not found: ID does not exist" containerID="b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.698133 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7"} err="failed to get container status \"b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7\": rpc error: code = NotFound desc = could not find container \"b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7\": container with ID starting with b7cb9e0fcf1395448c9b31b92e3649e1fb2a13680be102dbbfd00156fde395b7 not found: ID does not exist" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.698174 4932 scope.go:117] "RemoveContainer" containerID="b14798d5917cc61f3b00ba7f8d85b2da7547a76549fb09217f40aeeecb75dba8" Nov 25 09:12:52 crc kubenswrapper[4932]: E1125 09:12:52.698814 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b14798d5917cc61f3b00ba7f8d85b2da7547a76549fb09217f40aeeecb75dba8\": container with ID starting with b14798d5917cc61f3b00ba7f8d85b2da7547a76549fb09217f40aeeecb75dba8 not found: ID does not exist" containerID="b14798d5917cc61f3b00ba7f8d85b2da7547a76549fb09217f40aeeecb75dba8" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.698864 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b14798d5917cc61f3b00ba7f8d85b2da7547a76549fb09217f40aeeecb75dba8"} err="failed to get container status \"b14798d5917cc61f3b00ba7f8d85b2da7547a76549fb09217f40aeeecb75dba8\": rpc error: code = NotFound desc = could not find container \"b14798d5917cc61f3b00ba7f8d85b2da7547a76549fb09217f40aeeecb75dba8\": container with ID starting with b14798d5917cc61f3b00ba7f8d85b2da7547a76549fb09217f40aeeecb75dba8 not found: ID does not exist" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.698902 4932 scope.go:117] "RemoveContainer" containerID="4bf937effda2558a987ce6403f367f8e2502043750c10cd24461ca0e98f7c8db" Nov 25 09:12:52 crc kubenswrapper[4932]: E1125 09:12:52.699373 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bf937effda2558a987ce6403f367f8e2502043750c10cd24461ca0e98f7c8db\": container with ID starting with 4bf937effda2558a987ce6403f367f8e2502043750c10cd24461ca0e98f7c8db not found: ID does not exist" containerID="4bf937effda2558a987ce6403f367f8e2502043750c10cd24461ca0e98f7c8db" Nov 25 09:12:52 crc kubenswrapper[4932]: I1125 09:12:52.699443 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bf937effda2558a987ce6403f367f8e2502043750c10cd24461ca0e98f7c8db"} err="failed to get container status \"4bf937effda2558a987ce6403f367f8e2502043750c10cd24461ca0e98f7c8db\": rpc error: code = NotFound desc = could not find container \"4bf937effda2558a987ce6403f367f8e2502043750c10cd24461ca0e98f7c8db\": container with ID starting with 4bf937effda2558a987ce6403f367f8e2502043750c10cd24461ca0e98f7c8db not found: ID does not exist" Nov 25 09:12:54 crc kubenswrapper[4932]: I1125 09:12:54.629466 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b649a40-7fa0-4276-94eb-1a612f661578" path="/var/lib/kubelet/pods/3b649a40-7fa0-4276-94eb-1a612f661578/volumes" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.403382 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gxjgb"] Nov 25 09:12:55 crc kubenswrapper[4932]: E1125 09:12:55.403960 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b649a40-7fa0-4276-94eb-1a612f661578" containerName="registry-server" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.403991 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b649a40-7fa0-4276-94eb-1a612f661578" containerName="registry-server" Nov 25 09:12:55 crc kubenswrapper[4932]: E1125 09:12:55.404060 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b649a40-7fa0-4276-94eb-1a612f661578" containerName="extract-utilities" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.404076 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b649a40-7fa0-4276-94eb-1a612f661578" containerName="extract-utilities" Nov 25 09:12:55 crc kubenswrapper[4932]: E1125 09:12:55.404099 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b649a40-7fa0-4276-94eb-1a612f661578" containerName="extract-content" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.404109 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b649a40-7fa0-4276-94eb-1a612f661578" containerName="extract-content" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.404450 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b649a40-7fa0-4276-94eb-1a612f661578" containerName="registry-server" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.408746 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.418949 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gxjgb"] Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.515355 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-catalog-content\") pod \"redhat-operators-gxjgb\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.515894 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9npt\" (UniqueName: \"kubernetes.io/projected/84b6148f-b9bf-41ef-a1ba-c282f94882ee-kube-api-access-z9npt\") pod \"redhat-operators-gxjgb\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.516173 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-utilities\") pod \"redhat-operators-gxjgb\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.617878 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-utilities\") pod \"redhat-operators-gxjgb\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.617969 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-catalog-content\") pod \"redhat-operators-gxjgb\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.618067 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9npt\" (UniqueName: \"kubernetes.io/projected/84b6148f-b9bf-41ef-a1ba-c282f94882ee-kube-api-access-z9npt\") pod \"redhat-operators-gxjgb\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.619003 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-utilities\") pod \"redhat-operators-gxjgb\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.619516 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-catalog-content\") pod \"redhat-operators-gxjgb\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.638198 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9npt\" (UniqueName: \"kubernetes.io/projected/84b6148f-b9bf-41ef-a1ba-c282f94882ee-kube-api-access-z9npt\") pod \"redhat-operators-gxjgb\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:12:55 crc kubenswrapper[4932]: I1125 09:12:55.780565 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:12:56 crc kubenswrapper[4932]: I1125 09:12:56.211553 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gxjgb"] Nov 25 09:12:56 crc kubenswrapper[4932]: W1125 09:12:56.215044 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod84b6148f_b9bf_41ef_a1ba_c282f94882ee.slice/crio-615ae82b4ad7794a30e582433291f8eaf5d40716fe8b7598f7954cf40108bfa5 WatchSource:0}: Error finding container 615ae82b4ad7794a30e582433291f8eaf5d40716fe8b7598f7954cf40108bfa5: Status 404 returned error can't find the container with id 615ae82b4ad7794a30e582433291f8eaf5d40716fe8b7598f7954cf40108bfa5 Nov 25 09:12:56 crc kubenswrapper[4932]: I1125 09:12:56.630330 4932 generic.go:334] "Generic (PLEG): container finished" podID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerID="f82c61ca49ceda5f978194d115d5b068d1ddf690d8be385dc6e6283a09b7e99d" exitCode=0 Nov 25 09:12:56 crc kubenswrapper[4932]: I1125 09:12:56.630426 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjgb" event={"ID":"84b6148f-b9bf-41ef-a1ba-c282f94882ee","Type":"ContainerDied","Data":"f82c61ca49ceda5f978194d115d5b068d1ddf690d8be385dc6e6283a09b7e99d"} Nov 25 09:12:56 crc kubenswrapper[4932]: I1125 09:12:56.630665 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjgb" event={"ID":"84b6148f-b9bf-41ef-a1ba-c282f94882ee","Type":"ContainerStarted","Data":"615ae82b4ad7794a30e582433291f8eaf5d40716fe8b7598f7954cf40108bfa5"} Nov 25 09:12:57 crc kubenswrapper[4932]: I1125 09:12:57.645457 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjgb" event={"ID":"84b6148f-b9bf-41ef-a1ba-c282f94882ee","Type":"ContainerStarted","Data":"312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220"} Nov 25 09:12:59 crc kubenswrapper[4932]: I1125 09:12:59.679086 4932 generic.go:334] "Generic (PLEG): container finished" podID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerID="312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220" exitCode=0 Nov 25 09:12:59 crc kubenswrapper[4932]: I1125 09:12:59.679300 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjgb" event={"ID":"84b6148f-b9bf-41ef-a1ba-c282f94882ee","Type":"ContainerDied","Data":"312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220"} Nov 25 09:13:00 crc kubenswrapper[4932]: I1125 09:13:00.689953 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjgb" event={"ID":"84b6148f-b9bf-41ef-a1ba-c282f94882ee","Type":"ContainerStarted","Data":"30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270"} Nov 25 09:13:00 crc kubenswrapper[4932]: I1125 09:13:00.716733 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gxjgb" podStartSLOduration=2.013349526 podStartE2EDuration="5.716713954s" podCreationTimestamp="2025-11-25 09:12:55 +0000 UTC" firstStartedPulling="2025-11-25 09:12:56.631770348 +0000 UTC m=+1436.757799901" lastFinishedPulling="2025-11-25 09:13:00.335134756 +0000 UTC m=+1440.461164329" observedRunningTime="2025-11-25 09:13:00.705117057 +0000 UTC m=+1440.831146640" watchObservedRunningTime="2025-11-25 09:13:00.716713954 +0000 UTC m=+1440.842743517" Nov 25 09:13:03 crc kubenswrapper[4932]: I1125 09:13:03.957677 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 25 09:13:03 crc kubenswrapper[4932]: I1125 09:13:03.958372 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="28020cd8-f0a6-4aa9-80e6-4aa92b554850" containerName="openstackclient" containerID="cri-o://96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544" gracePeriod=2 Nov 25 09:13:03 crc kubenswrapper[4932]: I1125 09:13:03.969363 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.125962 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.126342 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" containerName="openstack-network-exporter" containerID="cri-o://3aa7ad743c9d91e2340b3e3408429966ea1670cfd6e520674b81c2217ef12e5e" gracePeriod=300 Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.214095 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placementb0bd-account-delete-vxbgw"] Nov 25 09:13:04 crc kubenswrapper[4932]: E1125 09:13:04.214705 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28020cd8-f0a6-4aa9-80e6-4aa92b554850" containerName="openstackclient" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.214765 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="28020cd8-f0a6-4aa9-80e6-4aa92b554850" containerName="openstackclient" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.215015 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="28020cd8-f0a6-4aa9-80e6-4aa92b554850" containerName="openstackclient" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.215665 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementb0bd-account-delete-vxbgw" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.235760 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.262817 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementb0bd-account-delete-vxbgw"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.311987 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce711acf-071a-4387-8c42-e2f3f8c25df9-operator-scripts\") pod \"placementb0bd-account-delete-vxbgw\" (UID: \"ce711acf-071a-4387-8c42-e2f3f8c25df9\") " pod="openstack/placementb0bd-account-delete-vxbgw" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.312107 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tw5xn\" (UniqueName: \"kubernetes.io/projected/ce711acf-071a-4387-8c42-e2f3f8c25df9-kube-api-access-tw5xn\") pod \"placementb0bd-account-delete-vxbgw\" (UID: \"ce711acf-071a-4387-8c42-e2f3f8c25df9\") " pod="openstack/placementb0bd-account-delete-vxbgw" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.324954 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.325167 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerName="openstack-network-exporter" containerID="cri-o://385bb0f63503360fe1dd3b8bc517012f4e561bae2dc4d40f0fb11f4b6501c4c1" gracePeriod=30 Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.325511 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerName="ovn-northd" containerID="cri-o://2cf6819b94d62fccf47fe857c4bedbcb3672a422e4bfda3c5103104951af3ed6" gracePeriod=30 Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.347721 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron4147-account-delete-qzg4q"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.349319 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron4147-account-delete-qzg4q" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.363819 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-gqn2r"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.374808 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron4147-account-delete-qzg4q"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.385393 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-gqn2r"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.395878 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder71a7-account-delete-wslds"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.398782 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder71a7-account-delete-wslds" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.414324 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce711acf-071a-4387-8c42-e2f3f8c25df9-operator-scripts\") pod \"placementb0bd-account-delete-vxbgw\" (UID: \"ce711acf-071a-4387-8c42-e2f3f8c25df9\") " pod="openstack/placementb0bd-account-delete-vxbgw" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.414488 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tw5xn\" (UniqueName: \"kubernetes.io/projected/ce711acf-071a-4387-8c42-e2f3f8c25df9-kube-api-access-tw5xn\") pod \"placementb0bd-account-delete-vxbgw\" (UID: \"ce711acf-071a-4387-8c42-e2f3f8c25df9\") " pod="openstack/placementb0bd-account-delete-vxbgw" Nov 25 09:13:04 crc kubenswrapper[4932]: E1125 09:13:04.415330 4932 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 25 09:13:04 crc kubenswrapper[4932]: E1125 09:13:04.415383 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data podName:f41b25a4-f48e-4938-9c23-0d89751af6ae nodeName:}" failed. No retries permitted until 2025-11-25 09:13:04.915367785 +0000 UTC m=+1445.041397348 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data") pod "rabbitmq-cell1-server-0" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae") : configmap "rabbitmq-cell1-config-data" not found Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.416327 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce711acf-071a-4387-8c42-e2f3f8c25df9-operator-scripts\") pod \"placementb0bd-account-delete-vxbgw\" (UID: \"ce711acf-071a-4387-8c42-e2f3f8c25df9\") " pod="openstack/placementb0bd-account-delete-vxbgw" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.435799 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-k6hqv"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.448021 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder71a7-account-delete-wslds"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.462617 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-k6hqv"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.468573 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tw5xn\" (UniqueName: \"kubernetes.io/projected/ce711acf-071a-4387-8c42-e2f3f8c25df9-kube-api-access-tw5xn\") pod \"placementb0bd-account-delete-vxbgw\" (UID: \"ce711acf-071a-4387-8c42-e2f3f8c25df9\") " pod="openstack/placementb0bd-account-delete-vxbgw" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.477825 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" containerName="ovsdbserver-sb" containerID="cri-o://cd839751f73e93f33c82ece92bdaf68a46775b2428ad48d61c20238e06cf889d" gracePeriod=300 Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.516215 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a181c094-1cf9-42bd-b038-cc8a6f437aa3-operator-scripts\") pod \"neutron4147-account-delete-qzg4q\" (UID: \"a181c094-1cf9-42bd-b038-cc8a6f437aa3\") " pod="openstack/neutron4147-account-delete-qzg4q" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.516355 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/633c3722-e337-4b6a-98fe-451ac451dd06-operator-scripts\") pod \"cinder71a7-account-delete-wslds\" (UID: \"633c3722-e337-4b6a-98fe-451ac451dd06\") " pod="openstack/cinder71a7-account-delete-wslds" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.516376 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzphm\" (UniqueName: \"kubernetes.io/projected/a181c094-1cf9-42bd-b038-cc8a6f437aa3-kube-api-access-zzphm\") pod \"neutron4147-account-delete-qzg4q\" (UID: \"a181c094-1cf9-42bd-b038-cc8a6f437aa3\") " pod="openstack/neutron4147-account-delete-qzg4q" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.516397 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8tr6\" (UniqueName: \"kubernetes.io/projected/633c3722-e337-4b6a-98fe-451ac451dd06-kube-api-access-g8tr6\") pod \"cinder71a7-account-delete-wslds\" (UID: \"633c3722-e337-4b6a-98fe-451ac451dd06\") " pod="openstack/cinder71a7-account-delete-wslds" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.555909 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance5148-account-delete-fbhmq"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.557129 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance5148-account-delete-fbhmq" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.584312 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican8b4d-account-delete-dprdr"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.585507 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican8b4d-account-delete-dprdr" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.588157 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementb0bd-account-delete-vxbgw" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.625148 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a181c094-1cf9-42bd-b038-cc8a6f437aa3-operator-scripts\") pod \"neutron4147-account-delete-qzg4q\" (UID: \"a181c094-1cf9-42bd-b038-cc8a6f437aa3\") " pod="openstack/neutron4147-account-delete-qzg4q" Nov 25 09:13:04 crc kubenswrapper[4932]: E1125 09:13:04.625424 4932 secret.go:188] Couldn't get secret openstack/barbican-config-data: secret "barbican-config-data" not found Nov 25 09:13:04 crc kubenswrapper[4932]: E1125 09:13:04.625482 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data podName:a83ee8ae-69d7-4ca5-ade1-9d2450880338 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:05.125464979 +0000 UTC m=+1445.251494532 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data") pod "barbican-keystone-listener-76dfd47846-vpn45" (UID: "a83ee8ae-69d7-4ca5-ade1-9d2450880338") : secret "barbican-config-data" not found Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.625887 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a181c094-1cf9-42bd-b038-cc8a6f437aa3-operator-scripts\") pod \"neutron4147-account-delete-qzg4q\" (UID: \"a181c094-1cf9-42bd-b038-cc8a6f437aa3\") " pod="openstack/neutron4147-account-delete-qzg4q" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.629631 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/633c3722-e337-4b6a-98fe-451ac451dd06-operator-scripts\") pod \"cinder71a7-account-delete-wslds\" (UID: \"633c3722-e337-4b6a-98fe-451ac451dd06\") " pod="openstack/cinder71a7-account-delete-wslds" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.629689 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzphm\" (UniqueName: \"kubernetes.io/projected/a181c094-1cf9-42bd-b038-cc8a6f437aa3-kube-api-access-zzphm\") pod \"neutron4147-account-delete-qzg4q\" (UID: \"a181c094-1cf9-42bd-b038-cc8a6f437aa3\") " pod="openstack/neutron4147-account-delete-qzg4q" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.629721 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8tr6\" (UniqueName: \"kubernetes.io/projected/633c3722-e337-4b6a-98fe-451ac451dd06-kube-api-access-g8tr6\") pod \"cinder71a7-account-delete-wslds\" (UID: \"633c3722-e337-4b6a-98fe-451ac451dd06\") " pod="openstack/cinder71a7-account-delete-wslds" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.631127 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/633c3722-e337-4b6a-98fe-451ac451dd06-operator-scripts\") pod \"cinder71a7-account-delete-wslds\" (UID: \"633c3722-e337-4b6a-98fe-451ac451dd06\") " pod="openstack/cinder71a7-account-delete-wslds" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.665902 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8tr6\" (UniqueName: \"kubernetes.io/projected/633c3722-e337-4b6a-98fe-451ac451dd06-kube-api-access-g8tr6\") pod \"cinder71a7-account-delete-wslds\" (UID: \"633c3722-e337-4b6a-98fe-451ac451dd06\") " pod="openstack/cinder71a7-account-delete-wslds" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.682501 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4a545d2-ff3c-4a27-b210-4803cdbf3c86" path="/var/lib/kubelet/pods/d4a545d2-ff3c-4a27-b210-4803cdbf3c86/volumes" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.686027 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzphm\" (UniqueName: \"kubernetes.io/projected/a181c094-1cf9-42bd-b038-cc8a6f437aa3-kube-api-access-zzphm\") pod \"neutron4147-account-delete-qzg4q\" (UID: \"a181c094-1cf9-42bd-b038-cc8a6f437aa3\") " pod="openstack/neutron4147-account-delete-qzg4q" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.690786 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e12148ff-1b2e-4c34-85c0-ca43747a2eb4" path="/var/lib/kubelet/pods/e12148ff-1b2e-4c34-85c0-ca43747a2eb4/volumes" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.700036 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance5148-account-delete-fbhmq"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.700072 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.700096 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican8b4d-account-delete-dprdr"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.706447 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron4147-account-delete-qzg4q" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.731995 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kcmd\" (UniqueName: \"kubernetes.io/projected/36140bfd-540f-40b6-8521-a8a3d408dc9d-kube-api-access-5kcmd\") pod \"glance5148-account-delete-fbhmq\" (UID: \"36140bfd-540f-40b6-8521-a8a3d408dc9d\") " pod="openstack/glance5148-account-delete-fbhmq" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.732073 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts\") pod \"glance5148-account-delete-fbhmq\" (UID: \"36140bfd-540f-40b6-8521-a8a3d408dc9d\") " pod="openstack/glance5148-account-delete-fbhmq" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.732121 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts\") pod \"barbican8b4d-account-delete-dprdr\" (UID: \"dbc1ab9c-f494-4ce9-8758-d5c724e4413a\") " pod="openstack/barbican8b4d-account-delete-dprdr" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.732148 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvr7j\" (UniqueName: \"kubernetes.io/projected/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-kube-api-access-vvr7j\") pod \"barbican8b4d-account-delete-dprdr\" (UID: \"dbc1ab9c-f494-4ce9-8758-d5c724e4413a\") " pod="openstack/barbican8b4d-account-delete-dprdr" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.745726 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder71a7-account-delete-wslds" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.835062 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts\") pod \"glance5148-account-delete-fbhmq\" (UID: \"36140bfd-540f-40b6-8521-a8a3d408dc9d\") " pod="openstack/glance5148-account-delete-fbhmq" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.835419 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts\") pod \"barbican8b4d-account-delete-dprdr\" (UID: \"dbc1ab9c-f494-4ce9-8758-d5c724e4413a\") " pod="openstack/barbican8b4d-account-delete-dprdr" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.835451 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvr7j\" (UniqueName: \"kubernetes.io/projected/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-kube-api-access-vvr7j\") pod \"barbican8b4d-account-delete-dprdr\" (UID: \"dbc1ab9c-f494-4ce9-8758-d5c724e4413a\") " pod="openstack/barbican8b4d-account-delete-dprdr" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.835602 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kcmd\" (UniqueName: \"kubernetes.io/projected/36140bfd-540f-40b6-8521-a8a3d408dc9d-kube-api-access-5kcmd\") pod \"glance5148-account-delete-fbhmq\" (UID: \"36140bfd-540f-40b6-8521-a8a3d408dc9d\") " pod="openstack/glance5148-account-delete-fbhmq" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.837850 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts\") pod \"glance5148-account-delete-fbhmq\" (UID: \"36140bfd-540f-40b6-8521-a8a3d408dc9d\") " pod="openstack/glance5148-account-delete-fbhmq" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.839271 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts\") pod \"barbican8b4d-account-delete-dprdr\" (UID: \"dbc1ab9c-f494-4ce9-8758-d5c724e4413a\") " pod="openstack/barbican8b4d-account-delete-dprdr" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.846392 4932 generic.go:334] "Generic (PLEG): container finished" podID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerID="385bb0f63503360fe1dd3b8bc517012f4e561bae2dc4d40f0fb11f4b6501c4c1" exitCode=2 Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.846458 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9d818a0-17fd-44a2-8855-a6f847efe274","Type":"ContainerDied","Data":"385bb0f63503360fe1dd3b8bc517012f4e561bae2dc4d40f0fb11f4b6501c4c1"} Nov 25 09:13:04 crc kubenswrapper[4932]: E1125 09:13:04.849605 4932 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 25 09:13:04 crc kubenswrapper[4932]: E1125 09:13:04.849688 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data podName:969d317e-0787-44a8-8e27-554b0e887444 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:05.349657563 +0000 UTC m=+1445.475687126 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data") pod "rabbitmq-server-0" (UID: "969d317e-0787-44a8-8e27-554b0e887444") : configmap "rabbitmq-config-data" not found Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.867325 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce/ovsdbserver-sb/0.log" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.867365 4932 generic.go:334] "Generic (PLEG): container finished" podID="f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" containerID="3aa7ad743c9d91e2340b3e3408429966ea1670cfd6e520674b81c2217ef12e5e" exitCode=2 Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.867381 4932 generic.go:334] "Generic (PLEG): container finished" podID="f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" containerID="cd839751f73e93f33c82ece92bdaf68a46775b2428ad48d61c20238e06cf889d" exitCode=143 Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.867412 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce","Type":"ContainerDied","Data":"3aa7ad743c9d91e2340b3e3408429966ea1670cfd6e520674b81c2217ef12e5e"} Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.867436 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce","Type":"ContainerDied","Data":"cd839751f73e93f33c82ece92bdaf68a46775b2428ad48d61c20238e06cf889d"} Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.873305 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-sqgkm"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.897321 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kcmd\" (UniqueName: \"kubernetes.io/projected/36140bfd-540f-40b6-8521-a8a3d408dc9d-kube-api-access-5kcmd\") pod \"glance5148-account-delete-fbhmq\" (UID: \"36140bfd-540f-40b6-8521-a8a3d408dc9d\") " pod="openstack/glance5148-account-delete-fbhmq" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.897614 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvr7j\" (UniqueName: \"kubernetes.io/projected/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-kube-api-access-vvr7j\") pod \"barbican8b4d-account-delete-dprdr\" (UID: \"dbc1ab9c-f494-4ce9-8758-d5c724e4413a\") " pod="openstack/barbican8b4d-account-delete-dprdr" Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.909385 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-sqgkm"] Nov 25 09:13:04 crc kubenswrapper[4932]: E1125 09:13:04.937144 4932 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 25 09:13:04 crc kubenswrapper[4932]: E1125 09:13:04.937224 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data podName:f41b25a4-f48e-4938-9c23-0d89751af6ae nodeName:}" failed. No retries permitted until 2025-11-25 09:13:05.937209073 +0000 UTC m=+1446.063238636 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data") pod "rabbitmq-cell1-server-0" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae") : configmap "rabbitmq-cell1-config-data" not found Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.946527 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-lc5vk"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.961873 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-lc5vk"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.970006 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-fbk5k"] Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.970253 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-fbk5k" podUID="c16a4087-2597-4662-880f-80a7a2a78ef2" containerName="openstack-network-exporter" containerID="cri-o://2e6c2d101453359a62990d88b2bfc484c902180a068c6131e7ba3d8b29699a33" gracePeriod=30 Nov 25 09:13:04 crc kubenswrapper[4932]: I1125 09:13:04.983726 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-c26qd"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.021917 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-drcqj"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.036993 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapi7e0a-account-delete-drmkw"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.038593 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance5148-account-delete-fbhmq" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.040087 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi7e0a-account-delete-drmkw" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.057909 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-p579r"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.077243 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-qbfss"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.077492 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" podUID="a9855d3c-818d-4804-add2-d6b0fce52613" containerName="dnsmasq-dns" containerID="cri-o://b8a9c7b4d8aee9148d3f80ec0b3f039c905ef40f6d4c9c1480a1255ba2197d40" gracePeriod=10 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.117563 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-p579r"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.132793 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi7e0a-account-delete-drmkw"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.133413 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican8b4d-account-delete-dprdr" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.143401 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts\") pod \"novaapi7e0a-account-delete-drmkw\" (UID: \"838bc013-33ba-4722-be1d-b88c9016c83a\") " pod="openstack/novaapi7e0a-account-delete-drmkw" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.143525 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hj78\" (UniqueName: \"kubernetes.io/projected/838bc013-33ba-4722-be1d-b88c9016c83a-kube-api-access-5hj78\") pod \"novaapi7e0a-account-delete-drmkw\" (UID: \"838bc013-33ba-4722-be1d-b88c9016c83a\") " pod="openstack/novaapi7e0a-account-delete-drmkw" Nov 25 09:13:05 crc kubenswrapper[4932]: E1125 09:13:05.143773 4932 secret.go:188] Couldn't get secret openstack/barbican-config-data: secret "barbican-config-data" not found Nov 25 09:13:05 crc kubenswrapper[4932]: E1125 09:13:05.143820 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data podName:a83ee8ae-69d7-4ca5-ade1-9d2450880338 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:06.143801236 +0000 UTC m=+1446.269830799 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data") pod "barbican-keystone-listener-76dfd47846-vpn45" (UID: "a83ee8ae-69d7-4ca5-ade1-9d2450880338") : secret "barbican-config-data" not found Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.166719 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell0a937-account-delete-czmhb"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.170882 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0a937-account-delete-czmhb" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.189230 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0a937-account-delete-czmhb"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.202345 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5cfb6b64bb-8mrcr"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.202693 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5cfb6b64bb-8mrcr" podUID="7a1917d6-4455-4cf5-b932-a38584663b02" containerName="placement-log" containerID="cri-o://a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.202856 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5cfb6b64bb-8mrcr" podUID="7a1917d6-4455-4cf5-b932-a38584663b02" containerName="placement-api" containerID="cri-o://37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.221118 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-crk46"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.245313 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndhwk\" (UniqueName: \"kubernetes.io/projected/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-kube-api-access-ndhwk\") pod \"novacell0a937-account-delete-czmhb\" (UID: \"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71\") " pod="openstack/novacell0a937-account-delete-czmhb" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.245414 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hj78\" (UniqueName: \"kubernetes.io/projected/838bc013-33ba-4722-be1d-b88c9016c83a-kube-api-access-5hj78\") pod \"novaapi7e0a-account-delete-drmkw\" (UID: \"838bc013-33ba-4722-be1d-b88c9016c83a\") " pod="openstack/novaapi7e0a-account-delete-drmkw" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.245521 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts\") pod \"novacell0a937-account-delete-czmhb\" (UID: \"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71\") " pod="openstack/novacell0a937-account-delete-czmhb" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.245539 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts\") pod \"novaapi7e0a-account-delete-drmkw\" (UID: \"838bc013-33ba-4722-be1d-b88c9016c83a\") " pod="openstack/novaapi7e0a-account-delete-drmkw" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.247091 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts\") pod \"novaapi7e0a-account-delete-drmkw\" (UID: \"838bc013-33ba-4722-be1d-b88c9016c83a\") " pod="openstack/novaapi7e0a-account-delete-drmkw" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.272813 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hj78\" (UniqueName: \"kubernetes.io/projected/838bc013-33ba-4722-be1d-b88c9016c83a-kube-api-access-5hj78\") pod \"novaapi7e0a-account-delete-drmkw\" (UID: \"838bc013-33ba-4722-be1d-b88c9016c83a\") " pod="openstack/novaapi7e0a-account-delete-drmkw" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.295602 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-crk46"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.312696 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5584db9bdf-rzbj9"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.312981 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5584db9bdf-rzbj9" podUID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" containerName="neutron-api" containerID="cri-o://ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.317623 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5584db9bdf-rzbj9" podUID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" containerName="neutron-httpd" containerID="cri-o://674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.347021 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts\") pod \"novacell0a937-account-delete-czmhb\" (UID: \"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71\") " pod="openstack/novacell0a937-account-delete-czmhb" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.347105 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndhwk\" (UniqueName: \"kubernetes.io/projected/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-kube-api-access-ndhwk\") pod \"novacell0a937-account-delete-czmhb\" (UID: \"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71\") " pod="openstack/novacell0a937-account-delete-czmhb" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.356655 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts\") pod \"novacell0a937-account-delete-czmhb\" (UID: \"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71\") " pod="openstack/novacell0a937-account-delete-czmhb" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.362778 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.362992 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" containerName="cinder-scheduler" containerID="cri-o://281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.363366 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" containerName="probe" containerID="cri-o://8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385006 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndhwk\" (UniqueName: \"kubernetes.io/projected/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-kube-api-access-ndhwk\") pod \"novacell0a937-account-delete-czmhb\" (UID: \"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71\") " pod="openstack/novacell0a937-account-delete-czmhb" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385080 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385591 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-server" containerID="cri-o://00edaf8b62c16ee50bbf819b1838d3ce3fd0a27605f5823b0347afa99c531c70" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385700 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-updater" containerID="cri-o://5ae59b9454a13af8d9b95946eddb65b72cf6eb58ca8f5a5c793ecae3ee358a2f" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385731 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-auditor" containerID="cri-o://8418a97dfcdcedf4b5696213eb9548d1ede0f2e23cfc955f8dc8202263735b8a" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385773 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-replicator" containerID="cri-o://568484e69b9c4127e15f009bf0e5694d15ca4b6ae5a35b7503f084c9adb3e9a3" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385812 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-server" containerID="cri-o://d77ea0e7a1509cc988fded84ce9cd4dc66e884a9b6f07ad09301588d2897762e" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385840 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="swift-recon-cron" containerID="cri-o://a9a48f9fe27c63900394a2e67fd1df3228736d3ba3410cb4defb99fc16d721f1" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385868 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="rsync" containerID="cri-o://78edf79de3cfd571e1fec0bd599680cc34a039f29b8fc703497738f0cf348ad8" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385897 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-expirer" containerID="cri-o://07e4106840372eb90e9a0d57a59631587d0bde7ac43d138cc5d5ec8a10885a84" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385926 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-updater" containerID="cri-o://06eca31abe1c59ab7a8cb701e4a49245773b3f648c29cdbe3b219489466d8705" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385951 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-auditor" containerID="cri-o://5a51256b52321de55e8f314f90f9c0ba2bb1175fd2f75c69c2bef2451a36ec18" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385965 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-reaper" containerID="cri-o://a45a7147f788b51504a339167827aa53fdb2e4a2d35f004cd41d1718e61f00a0" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.385982 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-auditor" containerID="cri-o://8d2c29a0a166c6ebbf9113b41e4e2ba9e248c36be93456a40622f2d5fcc2066e" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.386009 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-replicator" containerID="cri-o://a5d5ffb7d109b7b5eac7b58236b7451c24c1c17dca222fe9b3d425e19a748cf5" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.386019 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-replicator" containerID="cri-o://0a8e07a6bdc220d1412d97c2b357bdc14500c61e6d93f00a0915d134c315a151" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.386054 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-server" containerID="cri-o://0e9f1ea09136d57750420bc0ce46abbfd67cd0b1239ce71468be11a57e791720" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.405272 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-dnps4"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.416810 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi7e0a-account-delete-drmkw" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.429457 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-dnps4"] Nov 25 09:13:05 crc kubenswrapper[4932]: E1125 09:13:05.460622 4932 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 25 09:13:05 crc kubenswrapper[4932]: E1125 09:13:05.460707 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data podName:969d317e-0787-44a8-8e27-554b0e887444 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:06.460689208 +0000 UTC m=+1446.586718771 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data") pod "rabbitmq-server-0" (UID: "969d317e-0787-44a8-8e27-554b0e887444") : configmap "rabbitmq-config-data" not found Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.476774 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0a937-account-delete-czmhb" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.495858 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-bm59q"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.504096 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-bm59q"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.513997 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementb0bd-account-delete-vxbgw"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.535556 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.536163 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="5373bec8-828a-4e9b-b0fd-6a0ef84375de" containerName="openstack-network-exporter" containerID="cri-o://4ec3744d9a3e32c5d252c31f008b49edb1884c6bb290d5af4a837ca5bbb374f8" gracePeriod=300 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.561643 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.561853 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerName="glance-log" containerID="cri-o://84c8cfeb381f864d67634b78621a1b7460c7087ecdf9baf7bdd83200605e31e2" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.562342 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerName="glance-httpd" containerID="cri-o://b8682f71f2ee6925b54df3f64b25f4f743542faa8879099318a3b2e0226e6888" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.581251 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.581506 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c7865402-5a21-44f9-9436-d5d1bab67a07" containerName="cinder-api-log" containerID="cri-o://c8ae9ee3aa8405ff65c9452ed08700eda42757ab1407937bb5f3003fe4cf7a9e" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.581894 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="c7865402-5a21-44f9-9436-d5d1bab67a07" containerName="cinder-api" containerID="cri-o://80df99d51a793387f4befd153965af902fa51eff5beea4589846bd522aef8f83" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: E1125 09:13:05.596397 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2cf6819b94d62fccf47fe857c4bedbcb3672a422e4bfda3c5103104951af3ed6" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.600553 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.601461 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c5101ae2-5106-48c7-9116-4c0e5ededb84" containerName="glance-httpd" containerID="cri-o://72f71cf73b9865b04d4d3de5c8547c8ca66dceb1900d89f1ff42c5d833013afd" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.601668 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c5101ae2-5106-48c7-9116-4c0e5ededb84" containerName="glance-log" containerID="cri-o://3d4d2ece1e5eef9d1d0e16758fced0df7cde0583ea5a26d7bbd9fa814e5ca952" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: E1125 09:13:05.611923 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2cf6819b94d62fccf47fe857c4bedbcb3672a422e4bfda3c5103104951af3ed6" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.634136 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.652665 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.652857 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-log" containerID="cri-o://ef08398ee58bcf8e60c93b50283ca6afdcbca3b7a33b1eac1c91a89fe5b90230" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.653016 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-metadata" containerID="cri-o://7d27611ad3f8e0e548937326ec5872d5fd17ef030c916731538091ee33f8c092" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: E1125 09:13:05.665414 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2cf6819b94d62fccf47fe857c4bedbcb3672a422e4bfda3c5103104951af3ed6" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 25 09:13:05 crc kubenswrapper[4932]: E1125 09:13:05.665492 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerName="ovn-northd" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.742532 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.777306 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.777559 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="31823923-9ce9-49e0-b4c1-42418d49918c" containerName="nova-api-log" containerID="cri-o://4208013c150414e2c4e6a9db4af0a0ed4445d68f363fa9be56e13050961d4b79" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.777708 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="31823923-9ce9-49e0-b4c1-42418d49918c" containerName="nova-api-api" containerID="cri-o://7247d6a20300098ab3cb5a4ccdeaecb8b01f9585ec29af77a8b23a178fb313d8" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.782464 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.782504 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.795577 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-q67zl"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.810321 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-q67zl"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.820381 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-bc42-account-create-ppnl9"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.833441 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-bc42-account-create-ppnl9"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.907794 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-76dfd47846-vpn45"] Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.908091 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" podUID="a83ee8ae-69d7-4ca5-ade1-9d2450880338" containerName="barbican-keystone-listener-log" containerID="cri-o://905fc878d7a680a212ca79f470646dd7111019ec6d24cae51d0d6adfba1d2500" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.909267 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" podUID="a83ee8ae-69d7-4ca5-ade1-9d2450880338" containerName="barbican-keystone-listener" containerID="cri-o://be76288d747fd77398730e153b2bfa8b05e410e8971bd296d8c9d0bb4df3ac3b" gracePeriod=30 Nov 25 09:13:05 crc kubenswrapper[4932]: I1125 09:13:05.970017 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6c674848fb-kcq2h"] Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.003402 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6c674848fb-kcq2h" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerName="barbican-api-log" containerID="cri-o://a88145623badf6b75935e3c66fd27e243c456b8c75fa4980f833fcbd15313f78" gracePeriod=30 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.004451 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-6c674848fb-kcq2h" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerName="barbican-api" containerID="cri-o://bcb0b33d20667e08d805c88572654c89aed61e0f969c78fc5ef9ec57be99532f" gracePeriod=30 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.004689 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="f41b25a4-f48e-4938-9c23-0d89751af6ae" containerName="rabbitmq" containerID="cri-o://f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2" gracePeriod=604800 Nov 25 09:13:06 crc kubenswrapper[4932]: E1125 09:13:06.006137 4932 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 25 09:13:06 crc kubenswrapper[4932]: E1125 09:13:06.006209 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data podName:f41b25a4-f48e-4938-9c23-0d89751af6ae nodeName:}" failed. No retries permitted until 2025-11-25 09:13:08.006175622 +0000 UTC m=+1448.132205185 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data") pod "rabbitmq-cell1-server-0" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae") : configmap "rabbitmq-cell1-config-data" not found Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.029529 4932 generic.go:334] "Generic (PLEG): container finished" podID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerID="84c8cfeb381f864d67634b78621a1b7460c7087ecdf9baf7bdd83200605e31e2" exitCode=143 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.029610 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61","Type":"ContainerDied","Data":"84c8cfeb381f864d67634b78621a1b7460c7087ecdf9baf7bdd83200605e31e2"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.035874 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-6d444df75c-9wqvx"] Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.036144 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-6d444df75c-9wqvx" podUID="d1c39090-1743-40c3-95d5-71f5ca126c96" containerName="barbican-worker-log" containerID="cri-o://319658ac79c4fb4fcd46ed313645d6769272569f11fd5e0e78f4b23b5fcf4935" gracePeriod=30 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.037175 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-6d444df75c-9wqvx" podUID="d1c39090-1743-40c3-95d5-71f5ca126c96" containerName="barbican-worker" containerID="cri-o://abe66e4f341b24534642787b92c4263f4ebf66e15aa3b9d673ff051b62fba4b5" gracePeriod=30 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.097587 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.097831 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47" gracePeriod=30 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.115298 4932 generic.go:334] "Generic (PLEG): container finished" podID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerID="ef08398ee58bcf8e60c93b50283ca6afdcbca3b7a33b1eac1c91a89fe5b90230" exitCode=143 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.115415 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"90c30cef-5376-4f4a-8d59-9ab6daff902d","Type":"ContainerDied","Data":"ef08398ee58bcf8e60c93b50283ca6afdcbca3b7a33b1eac1c91a89fe5b90230"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.133473 4932 generic.go:334] "Generic (PLEG): container finished" podID="a9855d3c-818d-4804-add2-d6b0fce52613" containerID="b8a9c7b4d8aee9148d3f80ec0b3f039c905ef40f6d4c9c1480a1255ba2197d40" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.133534 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" event={"ID":"a9855d3c-818d-4804-add2-d6b0fce52613","Type":"ContainerDied","Data":"b8a9c7b4d8aee9148d3f80ec0b3f039c905ef40f6d4c9c1480a1255ba2197d40"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.195269 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.215475 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.215691 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="d586a3b8-c6b8-4c6e-aa6f-11797966d218" containerName="nova-cell1-conductor-conductor" containerID="cri-o://9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863" gracePeriod=30 Nov 25 09:13:06 crc kubenswrapper[4932]: E1125 09:13:06.218077 4932 secret.go:188] Couldn't get secret openstack/barbican-config-data: secret "barbican-config-data" not found Nov 25 09:13:06 crc kubenswrapper[4932]: E1125 09:13:06.218123 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data podName:a83ee8ae-69d7-4ca5-ade1-9d2450880338 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:08.21810738 +0000 UTC m=+1448.344136943 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data") pod "barbican-keystone-listener-76dfd47846-vpn45" (UID: "a83ee8ae-69d7-4ca5-ade1-9d2450880338") : secret "barbican-config-data" not found Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.243322 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-cell1-novncproxy-0" podUID="15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" containerName="nova-cell1-novncproxy-novncproxy" probeResult="failure" output="Get \"https://10.217.0.201:6080/vnc_lite.html\": dial tcp 10.217.0.201:6080: connect: connection refused" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.250737 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-ln6w4"] Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.257513 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6ktnm"] Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.285470 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-6ktnm"] Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.297308 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-ln6w4"] Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.321467 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-5584db9bdf-rzbj9" podUID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.177:9696/\": dial tcp 10.217.0.177:9696: connect: connection refused" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.337642 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.337846 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="b57cfb59-e562-4fb2-bfad-b4cf5382c45a" containerName="nova-cell0-conductor-conductor" containerID="cri-o://75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d" gracePeriod=30 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352766 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="5ae59b9454a13af8d9b95946eddb65b72cf6eb58ca8f5a5c793ecae3ee358a2f" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352792 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="8418a97dfcdcedf4b5696213eb9548d1ede0f2e23cfc955f8dc8202263735b8a" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352799 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="568484e69b9c4127e15f009bf0e5694d15ca4b6ae5a35b7503f084c9adb3e9a3" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352805 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="07e4106840372eb90e9a0d57a59631587d0bde7ac43d138cc5d5ec8a10885a84" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352811 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="06eca31abe1c59ab7a8cb701e4a49245773b3f648c29cdbe3b219489466d8705" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352816 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="5a51256b52321de55e8f314f90f9c0ba2bb1175fd2f75c69c2bef2451a36ec18" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352822 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="0a8e07a6bdc220d1412d97c2b357bdc14500c61e6d93f00a0915d134c315a151" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352828 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="a45a7147f788b51504a339167827aa53fdb2e4a2d35f004cd41d1718e61f00a0" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352834 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="8d2c29a0a166c6ebbf9113b41e4e2ba9e248c36be93456a40622f2d5fcc2066e" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352840 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="a5d5ffb7d109b7b5eac7b58236b7451c24c1c17dca222fe9b3d425e19a748cf5" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352915 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"5ae59b9454a13af8d9b95946eddb65b72cf6eb58ca8f5a5c793ecae3ee358a2f"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352946 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"8418a97dfcdcedf4b5696213eb9548d1ede0f2e23cfc955f8dc8202263735b8a"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352957 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"568484e69b9c4127e15f009bf0e5694d15ca4b6ae5a35b7503f084c9adb3e9a3"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352967 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"07e4106840372eb90e9a0d57a59631587d0bde7ac43d138cc5d5ec8a10885a84"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352977 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"06eca31abe1c59ab7a8cb701e4a49245773b3f648c29cdbe3b219489466d8705"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.352988 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"5a51256b52321de55e8f314f90f9c0ba2bb1175fd2f75c69c2bef2451a36ec18"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.353001 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"0a8e07a6bdc220d1412d97c2b357bdc14500c61e6d93f00a0915d134c315a151"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.353011 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"a45a7147f788b51504a339167827aa53fdb2e4a2d35f004cd41d1718e61f00a0"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.353023 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"8d2c29a0a166c6ebbf9113b41e4e2ba9e248c36be93456a40622f2d5fcc2066e"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.353032 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"a5d5ffb7d109b7b5eac7b58236b7451c24c1c17dca222fe9b3d425e19a748cf5"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.367811 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="f41b25a4-f48e-4938-9c23-0d89751af6ae" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.369577 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-fbk5k_c16a4087-2597-4662-880f-80a7a2a78ef2/openstack-network-exporter/0.log" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.369612 4932 generic.go:334] "Generic (PLEG): container finished" podID="c16a4087-2597-4662-880f-80a7a2a78ef2" containerID="2e6c2d101453359a62990d88b2bfc484c902180a068c6131e7ba3d8b29699a33" exitCode=2 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.369670 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-fbk5k" event={"ID":"c16a4087-2597-4662-880f-80a7a2a78ef2","Type":"ContainerDied","Data":"2e6c2d101453359a62990d88b2bfc484c902180a068c6131e7ba3d8b29699a33"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.383459 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementb0bd-account-delete-vxbgw" event={"ID":"ce711acf-071a-4387-8c42-e2f3f8c25df9","Type":"ContainerStarted","Data":"bfedcc77c3faf7eba937e63b985603e26c2ec437945d79e3c486c0e00aff0af3"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.424735 4932 generic.go:334] "Generic (PLEG): container finished" podID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" containerID="674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47" exitCode=0 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.424816 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5584db9bdf-rzbj9" event={"ID":"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5","Type":"ContainerDied","Data":"674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.441150 4932 generic.go:334] "Generic (PLEG): container finished" podID="5373bec8-828a-4e9b-b0fd-6a0ef84375de" containerID="4ec3744d9a3e32c5d252c31f008b49edb1884c6bb290d5af4a837ca5bbb374f8" exitCode=2 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.441220 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5373bec8-828a-4e9b-b0fd-6a0ef84375de","Type":"ContainerDied","Data":"4ec3744d9a3e32c5d252c31f008b49edb1884c6bb290d5af4a837ca5bbb374f8"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.443638 4932 generic.go:334] "Generic (PLEG): container finished" podID="c7865402-5a21-44f9-9436-d5d1bab67a07" containerID="c8ae9ee3aa8405ff65c9452ed08700eda42757ab1407937bb5f3003fe4cf7a9e" exitCode=143 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.443678 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7865402-5a21-44f9-9436-d5d1bab67a07","Type":"ContainerDied","Data":"c8ae9ee3aa8405ff65c9452ed08700eda42757ab1407937bb5f3003fe4cf7a9e"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.445139 4932 generic.go:334] "Generic (PLEG): container finished" podID="c5101ae2-5106-48c7-9116-4c0e5ededb84" containerID="3d4d2ece1e5eef9d1d0e16758fced0df7cde0583ea5a26d7bbd9fa814e5ca952" exitCode=143 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.445173 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c5101ae2-5106-48c7-9116-4c0e5ededb84","Type":"ContainerDied","Data":"3d4d2ece1e5eef9d1d0e16758fced0df7cde0583ea5a26d7bbd9fa814e5ca952"} Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.446952 4932 generic.go:334] "Generic (PLEG): container finished" podID="7a1917d6-4455-4cf5-b932-a38584663b02" containerID="a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067" exitCode=143 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.446972 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5cfb6b64bb-8mrcr" event={"ID":"7a1917d6-4455-4cf5-b932-a38584663b02","Type":"ContainerDied","Data":"a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067"} Nov 25 09:13:06 crc kubenswrapper[4932]: E1125 09:13:06.533946 4932 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 25 09:13:06 crc kubenswrapper[4932]: E1125 09:13:06.534057 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data podName:969d317e-0787-44a8-8e27-554b0e887444 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:08.534038004 +0000 UTC m=+1448.660067567 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data") pod "rabbitmq-server-0" (UID: "969d317e-0787-44a8-8e27-554b0e887444") : configmap "rabbitmq-config-data" not found Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.608714 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovs-vswitchd" containerID="cri-o://18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" gracePeriod=29 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.658097 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f0aa1af-46c3-4583-9140-149dddf9b048" path="/var/lib/kubelet/pods/0f0aa1af-46c3-4583-9140-149dddf9b048/volumes" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.663313 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27edebe8-2def-4a76-8f3d-0039ae29f4c8" path="/var/lib/kubelet/pods/27edebe8-2def-4a76-8f3d-0039ae29f4c8/volumes" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.678809 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046" path="/var/lib/kubelet/pods/6f2c7397-d6cd-48a4-8dd5-30b2d4cc9046/volumes" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.679417 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92f23c40-bf12-4901-8f08-5d306bab0cef" path="/var/lib/kubelet/pods/92f23c40-bf12-4901-8f08-5d306bab0cef/volumes" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.679947 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a24ef146-76b6-4034-afce-fa2e2c94e641" path="/var/lib/kubelet/pods/a24ef146-76b6-4034-afce-fa2e2c94e641/volumes" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.681626 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovs-vswitchd" probeResult="failure" output=< Nov 25 09:13:06 crc kubenswrapper[4932]: cat: /var/run/openvswitch/ovs-vswitchd.pid: No such file or directory Nov 25 09:13:06 crc kubenswrapper[4932]: ERROR - Failed to get pid for ovs-vswitchd, exit status: 0 Nov 25 09:13:06 crc kubenswrapper[4932]: > Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.692983 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9fc814a-d54a-4157-9257-db33b7734522" path="/var/lib/kubelet/pods/b9fc814a-d54a-4157-9257-db33b7734522/volumes" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.693532 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbf19371-546d-4971-a555-443c36b129be" path="/var/lib/kubelet/pods/cbf19371-546d-4971-a555-443c36b129be/volumes" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.694075 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0fb7667-bb72-4856-9492-7c0f783f3a7f" path="/var/lib/kubelet/pods/e0fb7667-bb72-4856-9492-7c0f783f3a7f/volumes" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.695348 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="969d317e-0787-44a8-8e27-554b0e887444" containerName="rabbitmq" containerID="cri-o://86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a" gracePeriod=604800 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.709280 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e14c1b6a-a83b-47fc-8fac-36468c1b4df5" path="/var/lib/kubelet/pods/e14c1b6a-a83b-47fc-8fac-36468c1b4df5/volumes" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.710374 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee8b5a64-5144-4fd9-a7b0-b12d318ababa" path="/var/lib/kubelet/pods/ee8b5a64-5144-4fd9-a7b0-b12d318ababa/volumes" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.756231 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="5373bec8-828a-4e9b-b0fd-6a0ef84375de" containerName="ovsdbserver-nb" containerID="cri-o://1b7fb306f61206aff751cf1adbf835164dd03eeceaa44f76421e2b0575c75592" gracePeriod=299 Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.757659 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.757821 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="186ced68-a489-410c-afa6-d4d623c37fc1" containerName="nova-scheduler-scheduler" containerID="cri-o://c2a6c3a1e0b539444b03c2c2b147c48f0e4e50e3895eb1146918d21fcc6cd271" gracePeriod=30 Nov 25 09:13:06 crc kubenswrapper[4932]: E1125 09:13:06.887757 4932 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 25 09:13:06 crc kubenswrapper[4932]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 25 09:13:06 crc kubenswrapper[4932]: + source /usr/local/bin/container-scripts/functions Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNBridge=br-int Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNRemote=tcp:localhost:6642 Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNEncapType=geneve Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNAvailabilityZones= Nov 25 09:13:06 crc kubenswrapper[4932]: ++ EnableChassisAsGateway=true Nov 25 09:13:06 crc kubenswrapper[4932]: ++ PhysicalNetworks= Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNHostName= Nov 25 09:13:06 crc kubenswrapper[4932]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 25 09:13:06 crc kubenswrapper[4932]: ++ ovs_dir=/var/lib/openvswitch Nov 25 09:13:06 crc kubenswrapper[4932]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 25 09:13:06 crc kubenswrapper[4932]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 25 09:13:06 crc kubenswrapper[4932]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + sleep 0.5 Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + sleep 0.5 Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + sleep 0.5 Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + cleanup_ovsdb_server_semaphore Nov 25 09:13:06 crc kubenswrapper[4932]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 25 09:13:06 crc kubenswrapper[4932]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 25 09:13:06 crc kubenswrapper[4932]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-drcqj" message=< Nov 25 09:13:06 crc kubenswrapper[4932]: Exiting ovsdb-server (5) [ OK ] Nov 25 09:13:06 crc kubenswrapper[4932]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 25 09:13:06 crc kubenswrapper[4932]: + source /usr/local/bin/container-scripts/functions Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNBridge=br-int Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNRemote=tcp:localhost:6642 Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNEncapType=geneve Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNAvailabilityZones= Nov 25 09:13:06 crc kubenswrapper[4932]: ++ EnableChassisAsGateway=true Nov 25 09:13:06 crc kubenswrapper[4932]: ++ PhysicalNetworks= Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNHostName= Nov 25 09:13:06 crc kubenswrapper[4932]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 25 09:13:06 crc kubenswrapper[4932]: ++ ovs_dir=/var/lib/openvswitch Nov 25 09:13:06 crc kubenswrapper[4932]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 25 09:13:06 crc kubenswrapper[4932]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 25 09:13:06 crc kubenswrapper[4932]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + sleep 0.5 Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + sleep 0.5 Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + sleep 0.5 Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + cleanup_ovsdb_server_semaphore Nov 25 09:13:06 crc kubenswrapper[4932]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 25 09:13:06 crc kubenswrapper[4932]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 25 09:13:06 crc kubenswrapper[4932]: > Nov 25 09:13:06 crc kubenswrapper[4932]: E1125 09:13:06.887995 4932 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 25 09:13:06 crc kubenswrapper[4932]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 25 09:13:06 crc kubenswrapper[4932]: + source /usr/local/bin/container-scripts/functions Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNBridge=br-int Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNRemote=tcp:localhost:6642 Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNEncapType=geneve Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNAvailabilityZones= Nov 25 09:13:06 crc kubenswrapper[4932]: ++ EnableChassisAsGateway=true Nov 25 09:13:06 crc kubenswrapper[4932]: ++ PhysicalNetworks= Nov 25 09:13:06 crc kubenswrapper[4932]: ++ OVNHostName= Nov 25 09:13:06 crc kubenswrapper[4932]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 25 09:13:06 crc kubenswrapper[4932]: ++ ovs_dir=/var/lib/openvswitch Nov 25 09:13:06 crc kubenswrapper[4932]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 25 09:13:06 crc kubenswrapper[4932]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 25 09:13:06 crc kubenswrapper[4932]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + sleep 0.5 Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + sleep 0.5 Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + sleep 0.5 Nov 25 09:13:06 crc kubenswrapper[4932]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 25 09:13:06 crc kubenswrapper[4932]: + cleanup_ovsdb_server_semaphore Nov 25 09:13:06 crc kubenswrapper[4932]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 25 09:13:06 crc kubenswrapper[4932]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 25 09:13:06 crc kubenswrapper[4932]: > pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server" containerID="cri-o://120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" Nov 25 09:13:06 crc kubenswrapper[4932]: I1125 09:13:06.888030 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server" containerID="cri-o://120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" gracePeriod=29 Nov 25 09:13:07 crc kubenswrapper[4932]: E1125 09:13:07.057153 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:13:07 crc kubenswrapper[4932]: E1125 09:13:07.058552 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:13:07 crc kubenswrapper[4932]: E1125 09:13:07.064891 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:13:07 crc kubenswrapper[4932]: E1125 09:13:07.064952 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="d586a3b8-c6b8-4c6e-aa6f-11797966d218" containerName="nova-cell1-conductor-conductor" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.142608 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="2023df73-6a92-4838-8d5e-31f533796950" containerName="galera" containerID="cri-o://bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982" gracePeriod=29 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.180661 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce/ovsdbserver-sb/0.log" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.180743 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.197624 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dj7zg\" (UniqueName: \"kubernetes.io/projected/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-kube-api-access-dj7zg\") pod \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.197680 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdbserver-sb-tls-certs\") pod \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.197716 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-metrics-certs-tls-certs\") pod \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.197760 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-config\") pod \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.197778 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-scripts\") pod \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.199822 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-combined-ca-bundle\") pod \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.199846 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-config" (OuterVolumeSpecName: "config") pod "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" (UID: "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.199909 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdb-rundir\") pod \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.199935 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\" (UID: \"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.200226 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-scripts" (OuterVolumeSpecName: "scripts") pod "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" (UID: "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.200645 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.200656 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.203012 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" (UID: "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.211414 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" (UID: "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.211476 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-kube-api-access-dj7zg" (OuterVolumeSpecName: "kube-api-access-dj7zg") pod "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" (UID: "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce"). InnerVolumeSpecName "kube-api-access-dj7zg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.293012 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-fbk5k_c16a4087-2597-4662-880f-80a7a2a78ef2/openstack-network-exporter/0.log" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.293087 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.301751 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovn-rundir\") pod \"c16a4087-2597-4662-880f-80a7a2a78ef2\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.301841 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-combined-ca-bundle\") pod \"c16a4087-2597-4662-880f-80a7a2a78ef2\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.302021 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c16a4087-2597-4662-880f-80a7a2a78ef2-config\") pod \"c16a4087-2597-4662-880f-80a7a2a78ef2\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.302048 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovs-rundir\") pod \"c16a4087-2597-4662-880f-80a7a2a78ef2\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.302093 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vq8rm\" (UniqueName: \"kubernetes.io/projected/c16a4087-2597-4662-880f-80a7a2a78ef2-kube-api-access-vq8rm\") pod \"c16a4087-2597-4662-880f-80a7a2a78ef2\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.302123 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-metrics-certs-tls-certs\") pod \"c16a4087-2597-4662-880f-80a7a2a78ef2\" (UID: \"c16a4087-2597-4662-880f-80a7a2a78ef2\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.302529 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.302555 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.302564 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dj7zg\" (UniqueName: \"kubernetes.io/projected/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-kube-api-access-dj7zg\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.304068 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c16a4087-2597-4662-880f-80a7a2a78ef2-config" (OuterVolumeSpecName: "config") pod "c16a4087-2597-4662-880f-80a7a2a78ef2" (UID: "c16a4087-2597-4662-880f-80a7a2a78ef2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.304107 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "c16a4087-2597-4662-880f-80a7a2a78ef2" (UID: "c16a4087-2597-4662-880f-80a7a2a78ef2"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.304348 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "c16a4087-2597-4662-880f-80a7a2a78ef2" (UID: "c16a4087-2597-4662-880f-80a7a2a78ef2"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.311015 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" (UID: "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.316130 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c16a4087-2597-4662-880f-80a7a2a78ef2-kube-api-access-vq8rm" (OuterVolumeSpecName: "kube-api-access-vq8rm") pod "c16a4087-2597-4662-880f-80a7a2a78ef2" (UID: "c16a4087-2597-4662-880f-80a7a2a78ef2"). InnerVolumeSpecName "kube-api-access-vq8rm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: E1125 09:13:07.392935 4932 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28020cd8_f0a6_4aa9_80e6_4aa92b554850.slice/crio-96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod257c86ab_2577_4d46_bdb3_1ec56da0d21e.slice/crio-conmon-120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1c39090_1743_40c3_95d5_71f5ca126c96.slice/crio-319658ac79c4fb4fcd46ed313645d6769272569f11fd5e0e78f4b23b5fcf4935.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81ccee4a_f414_4007_ae17_b440b55dea5f.slice/crio-00edaf8b62c16ee50bbf819b1838d3ce3fd0a27605f5823b0347afa99c531c70.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5373bec8_828a_4e9b_b0fd_6a0ef84375de.slice/crio-1b7fb306f61206aff751cf1adbf835164dd03eeceaa44f76421e2b0575c75592.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81ccee4a_f414_4007_ae17_b440b55dea5f.slice/crio-conmon-78edf79de3cfd571e1fec0bd599680cc34a039f29b8fc703497738f0cf348ad8.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1e12e22_8a2c_4093_b9c5_7cc68348e0ee.slice/crio-a88145623badf6b75935e3c66fd27e243c456b8c75fa4980f833fcbd15313f78.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1c39090_1743_40c3_95d5_71f5ca126c96.slice/crio-conmon-319658ac79c4fb4fcd46ed313645d6769272569f11fd5e0e78f4b23b5fcf4935.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81ccee4a_f414_4007_ae17_b440b55dea5f.slice/crio-conmon-00edaf8b62c16ee50bbf819b1838d3ce3fd0a27605f5823b0347afa99c531c70.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1e12e22_8a2c_4093_b9c5_7cc68348e0ee.slice/crio-conmon-a88145623badf6b75935e3c66fd27e243c456b8c75fa4980f833fcbd15313f78.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81ccee4a_f414_4007_ae17_b440b55dea5f.slice/crio-d77ea0e7a1509cc988fded84ce9cd4dc66e884a9b6f07ad09301588d2897762e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod257c86ab_2577_4d46_bdb3_1ec56da0d21e.slice/crio-120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99.scope\": RecentStats: unable to find data in memory cache]" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.404600 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.404632 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c16a4087-2597-4662-880f-80a7a2a78ef2-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.404642 4932 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovs-rundir\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.404650 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vq8rm\" (UniqueName: \"kubernetes.io/projected/c16a4087-2597-4662-880f-80a7a2a78ef2-kube-api-access-vq8rm\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.404660 4932 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/c16a4087-2597-4662-880f-80a7a2a78ef2-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.427333 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.511679 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.538652 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.539109 4932 generic.go:334] "Generic (PLEG): container finished" podID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" exitCode=0 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.539214 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-drcqj" event={"ID":"257c86ab-2577-4d46-bdb3-1ec56da0d21e","Type":"ContainerDied","Data":"120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.544995 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.545344 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5373bec8-828a-4e9b-b0fd-6a0ef84375de/ovsdbserver-nb/0.log" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.545376 4932 generic.go:334] "Generic (PLEG): container finished" podID="5373bec8-828a-4e9b-b0fd-6a0ef84375de" containerID="1b7fb306f61206aff751cf1adbf835164dd03eeceaa44f76421e2b0575c75592" exitCode=143 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.545418 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5373bec8-828a-4e9b-b0fd-6a0ef84375de","Type":"ContainerDied","Data":"1b7fb306f61206aff751cf1adbf835164dd03eeceaa44f76421e2b0575c75592"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.547363 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.557806 4932 generic.go:334] "Generic (PLEG): container finished" podID="28020cd8-f0a6-4aa9-80e6-4aa92b554850" containerID="96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544" exitCode=137 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.557922 4932 scope.go:117] "RemoveContainer" containerID="96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.558056 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.597943 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" (UID: "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.611380 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron4147-account-delete-qzg4q"] Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.625560 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-swift-storage-0\") pod \"a9855d3c-818d-4804-add2-d6b0fce52613\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.625633 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config-secret\") pod \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.625703 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-nb\") pod \"a9855d3c-818d-4804-add2-d6b0fce52613\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.625725 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-sb\") pod \"a9855d3c-818d-4804-add2-d6b0fce52613\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.625746 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-combined-ca-bundle\") pod \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.625838 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5x5hp\" (UniqueName: \"kubernetes.io/projected/a9855d3c-818d-4804-add2-d6b0fce52613-kube-api-access-5x5hp\") pod \"a9855d3c-818d-4804-add2-d6b0fce52613\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.625861 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-combined-ca-bundle\") pod \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.625913 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-svc\") pod \"a9855d3c-818d-4804-add2-d6b0fce52613\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.625973 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-config-data\") pod \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.626002 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config\") pod \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.626043 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-config\") pod \"a9855d3c-818d-4804-add2-d6b0fce52613\" (UID: \"a9855d3c-818d-4804-add2-d6b0fce52613\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.626084 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c6b8f\" (UniqueName: \"kubernetes.io/projected/28020cd8-f0a6-4aa9-80e6-4aa92b554850-kube-api-access-c6b8f\") pod \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\" (UID: \"28020cd8-f0a6-4aa9-80e6-4aa92b554850\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.626108 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-nova-novncproxy-tls-certs\") pod \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.626146 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-vencrypt-tls-certs\") pod \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.626171 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjqk7\" (UniqueName: \"kubernetes.io/projected/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-kube-api-access-hjqk7\") pod \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\" (UID: \"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.626746 4932 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.646032 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28020cd8-f0a6-4aa9-80e6-4aa92b554850-kube-api-access-c6b8f" (OuterVolumeSpecName: "kube-api-access-c6b8f") pod "28020cd8-f0a6-4aa9-80e6-4aa92b554850" (UID: "28020cd8-f0a6-4aa9-80e6-4aa92b554850"). InnerVolumeSpecName "kube-api-access-c6b8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.647962 4932 generic.go:334] "Generic (PLEG): container finished" podID="31823923-9ce9-49e0-b4c1-42418d49918c" containerID="4208013c150414e2c4e6a9db4af0a0ed4445d68f363fa9be56e13050961d4b79" exitCode=143 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.648062 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31823923-9ce9-49e0-b4c1-42418d49918c","Type":"ContainerDied","Data":"4208013c150414e2c4e6a9db4af0a0ed4445d68f363fa9be56e13050961d4b79"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.672521 4932 scope.go:117] "RemoveContainer" containerID="96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.672622 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.675071 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-kube-api-access-hjqk7" (OuterVolumeSpecName: "kube-api-access-hjqk7") pod "15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" (UID: "15f7fd9d-7a12-4f06-9f9e-d9e4d059039f"). InnerVolumeSpecName "kube-api-access-hjqk7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: E1125 09:13:07.678400 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544\": container with ID starting with 96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544 not found: ID does not exist" containerID="96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.678437 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544"} err="failed to get container status \"96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544\": rpc error: code = NotFound desc = could not find container \"96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544\": container with ID starting with 96f164253ef0f11daa0a8ad7022d34f83a8489d82a85114d11f59e0a87a1a544 not found: ID does not exist" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.696768 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-fbk5k_c16a4087-2597-4662-880f-80a7a2a78ef2/openstack-network-exporter/0.log" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.696887 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-fbk5k" event={"ID":"c16a4087-2597-4662-880f-80a7a2a78ef2","Type":"ContainerDied","Data":"29d3d7589e73d563616f3011531058bcebdfc971cdd0988bee5b0aca53c07b26"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.696928 4932 scope.go:117] "RemoveContainer" containerID="2e6c2d101453359a62990d88b2bfc484c902180a068c6131e7ba3d8b29699a33" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.697065 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-fbk5k" Nov 25 09:13:07 crc kubenswrapper[4932]: W1125 09:13:07.732698 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda181c094_1cf9_42bd_b038_cc8a6f437aa3.slice/crio-8862c2da11a079c7703f4807d33328aec442b1e3307f2cd8b23df468be562b63 WatchSource:0}: Error finding container 8862c2da11a079c7703f4807d33328aec442b1e3307f2cd8b23df468be562b63: Status 404 returned error can't find the container with id 8862c2da11a079c7703f4807d33328aec442b1e3307f2cd8b23df468be562b63 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.732826 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-combined-ca-bundle\") pod \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.732937 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-scripts\") pod \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.732964 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data\") pod \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.733096 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data-custom\") pod \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.733144 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gq7g\" (UniqueName: \"kubernetes.io/projected/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-kube-api-access-8gq7g\") pod \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.733267 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-etc-machine-id\") pod \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\" (UID: \"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc\") " Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.733802 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c6b8f\" (UniqueName: \"kubernetes.io/projected/28020cd8-f0a6-4aa9-80e6-4aa92b554850-kube-api-access-c6b8f\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.733820 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjqk7\" (UniqueName: \"kubernetes.io/projected/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-kube-api-access-hjqk7\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.733874 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" (UID: "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.738506 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gxjgb" podUID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerName="registry-server" probeResult="failure" output=< Nov 25 09:13:07 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 09:13:07 crc kubenswrapper[4932]: > Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.740548 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9855d3c-818d-4804-add2-d6b0fce52613-kube-api-access-5x5hp" (OuterVolumeSpecName: "kube-api-access-5x5hp") pod "a9855d3c-818d-4804-add2-d6b0fce52613" (UID: "a9855d3c-818d-4804-add2-d6b0fce52613"). InnerVolumeSpecName "kube-api-access-5x5hp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.765347 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-config-data" (OuterVolumeSpecName: "config-data") pod "15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" (UID: "15f7fd9d-7a12-4f06-9f9e-d9e4d059039f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.805523 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" (UID: "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.810985 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="d77ea0e7a1509cc988fded84ce9cd4dc66e884a9b6f07ad09301588d2897762e" exitCode=0 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.811016 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="78edf79de3cfd571e1fec0bd599680cc34a039f29b8fc703497738f0cf348ad8" exitCode=0 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.811023 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="0e9f1ea09136d57750420bc0ce46abbfd67cd0b1239ce71468be11a57e791720" exitCode=0 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.811031 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="00edaf8b62c16ee50bbf819b1838d3ce3fd0a27605f5823b0347afa99c531c70" exitCode=0 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.811076 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"d77ea0e7a1509cc988fded84ce9cd4dc66e884a9b6f07ad09301588d2897762e"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.811100 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"78edf79de3cfd571e1fec0bd599680cc34a039f29b8fc703497738f0cf348ad8"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.811110 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"0e9f1ea09136d57750420bc0ce46abbfd67cd0b1239ce71468be11a57e791720"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.811120 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"00edaf8b62c16ee50bbf819b1838d3ce3fd0a27605f5823b0347afa99c531c70"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.821977 4932 generic.go:334] "Generic (PLEG): container finished" podID="52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" containerID="8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140" exitCode=0 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.822031 4932 generic.go:334] "Generic (PLEG): container finished" podID="52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" containerID="281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34" exitCode=0 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.822080 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc","Type":"ContainerDied","Data":"8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.822108 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc","Type":"ContainerDied","Data":"281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.822175 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.826963 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-kube-api-access-8gq7g" (OuterVolumeSpecName: "kube-api-access-8gq7g") pod "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" (UID: "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc"). InnerVolumeSpecName "kube-api-access-8gq7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.827537 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-scripts" (OuterVolumeSpecName: "scripts") pod "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" (UID: "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.829098 4932 generic.go:334] "Generic (PLEG): container finished" podID="d1c39090-1743-40c3-95d5-71f5ca126c96" containerID="319658ac79c4fb4fcd46ed313645d6769272569f11fd5e0e78f4b23b5fcf4935" exitCode=143 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.829163 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6d444df75c-9wqvx" event={"ID":"d1c39090-1743-40c3-95d5-71f5ca126c96","Type":"ContainerDied","Data":"319658ac79c4fb4fcd46ed313645d6769272569f11fd5e0e78f4b23b5fcf4935"} Nov 25 09:13:07 crc kubenswrapper[4932]: E1125 09:13:07.829668 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c2a6c3a1e0b539444b03c2c2b147c48f0e4e50e3895eb1146918d21fcc6cd271" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 09:13:07 crc kubenswrapper[4932]: E1125 09:13:07.832604 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c2a6c3a1e0b539444b03c2c2b147c48f0e4e50e3895eb1146918d21fcc6cd271" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.835625 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gq7g\" (UniqueName: \"kubernetes.io/projected/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-kube-api-access-8gq7g\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.835653 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.835662 4932 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.835670 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.835681 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5x5hp\" (UniqueName: \"kubernetes.io/projected/a9855d3c-818d-4804-add2-d6b0fce52613-kube-api-access-5x5hp\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.835690 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: E1125 09:13:07.835791 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c2a6c3a1e0b539444b03c2c2b147c48f0e4e50e3895eb1146918d21fcc6cd271" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 09:13:07 crc kubenswrapper[4932]: E1125 09:13:07.835830 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="186ced68-a489-410c-afa6-d4d623c37fc1" containerName="nova-scheduler-scheduler" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.836830 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "c16a4087-2597-4662-880f-80a7a2a78ef2" (UID: "c16a4087-2597-4662-880f-80a7a2a78ef2"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.837893 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" event={"ID":"a9855d3c-818d-4804-add2-d6b0fce52613","Type":"ContainerDied","Data":"5a9d32ed53090394bbfdda6cf63805ca96774969fb605b5e3f2d6659302d976f"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.837975 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.852500 4932 generic.go:334] "Generic (PLEG): container finished" podID="a83ee8ae-69d7-4ca5-ade1-9d2450880338" containerID="905fc878d7a680a212ca79f470646dd7111019ec6d24cae51d0d6adfba1d2500" exitCode=143 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.852598 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" event={"ID":"a83ee8ae-69d7-4ca5-ade1-9d2450880338","Type":"ContainerDied","Data":"905fc878d7a680a212ca79f470646dd7111019ec6d24cae51d0d6adfba1d2500"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.855958 4932 generic.go:334] "Generic (PLEG): container finished" podID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerID="a88145623badf6b75935e3c66fd27e243c456b8c75fa4980f833fcbd15313f78" exitCode=143 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.856006 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c674848fb-kcq2h" event={"ID":"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee","Type":"ContainerDied","Data":"a88145623badf6b75935e3c66fd27e243c456b8c75fa4980f833fcbd15313f78"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.872815 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce/ovsdbserver-sb/0.log" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.872965 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.873637 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce","Type":"ContainerDied","Data":"8082e61f9afe6b88dfe1a633a0d10565d2bcd3ef76138c30a9abdc556141a656"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.886250 4932 generic.go:334] "Generic (PLEG): container finished" podID="15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" containerID="b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47" exitCode=0 Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.886302 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f","Type":"ContainerDied","Data":"b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47"} Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.886721 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.896862 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder71a7-account-delete-wslds"] Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.918350 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" (UID: "f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.937439 4932 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.937464 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:07 crc kubenswrapper[4932]: I1125 09:13:07.982338 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican8b4d-account-delete-dprdr"] Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.024376 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "28020cd8-f0a6-4aa9-80e6-4aa92b554850" (UID: "28020cd8-f0a6-4aa9-80e6-4aa92b554850"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.103046 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "28020cd8-f0a6-4aa9-80e6-4aa92b554850" (UID: "28020cd8-f0a6-4aa9-80e6-4aa92b554850"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.127493 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance5148-account-delete-fbhmq"] Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.135746 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.135821 4932 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.135876 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data podName:f41b25a4-f48e-4938-9c23-0d89751af6ae nodeName:}" failed. No retries permitted until 2025-11-25 09:13:12.135858889 +0000 UTC m=+1452.261888452 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data") pod "rabbitmq-cell1-server-0" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae") : configmap "rabbitmq-cell1-config-data" not found Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.164966 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0a937-account-delete-czmhb"] Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.212440 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-7f5484589f-8gmzk"] Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.212737 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-7f5484589f-8gmzk" podUID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerName="proxy-httpd" containerID="cri-o://53bcbc203394b3c852ba1c6182bc8eaf5e1970de1e3b7f900c6947cac59286d4" gracePeriod=30 Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.213274 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-7f5484589f-8gmzk" podUID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerName="proxy-server" containerID="cri-o://bb6122d938bb9d23fba3db816a5bc8cfafc993ae9ba9ebacdda64a4c57056966" gracePeriod=30 Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.214463 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" (UID: "15f7fd9d-7a12-4f06-9f9e-d9e4d059039f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.229150 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c16a4087-2597-4662-880f-80a7a2a78ef2" (UID: "c16a4087-2597-4662-880f-80a7a2a78ef2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.237699 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c16a4087-2597-4662-880f-80a7a2a78ef2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.237734 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.237746 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/28020cd8-f0a6-4aa9-80e6-4aa92b554850-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.237833 4932 secret.go:188] Couldn't get secret openstack/barbican-config-data: secret "barbican-config-data" not found Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.237887 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data podName:a83ee8ae-69d7-4ca5-ade1-9d2450880338 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:12.237869458 +0000 UTC m=+1452.363899021 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data") pod "barbican-keystone-listener-76dfd47846-vpn45" (UID: "a83ee8ae-69d7-4ca5-ade1-9d2450880338") : secret "barbican-config-data" not found Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.258398 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" (UID: "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.286807 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi7e0a-account-delete-drmkw"] Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.304285 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" (UID: "15f7fd9d-7a12-4f06-9f9e-d9e4d059039f"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.342075 4932 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.347296 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.348671 4932 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 25 09:13:08 crc kubenswrapper[4932]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-25T09:13:06Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 25 09:13:08 crc kubenswrapper[4932]: /etc/init.d/functions: line 589: 484 Alarm clock "$@" Nov 25 09:13:08 crc kubenswrapper[4932]: > execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-c26qd" message=< Nov 25 09:13:08 crc kubenswrapper[4932]: Exiting ovn-controller (1) [FAILED] Nov 25 09:13:08 crc kubenswrapper[4932]: Killing ovn-controller (1) [ OK ] Nov 25 09:13:08 crc kubenswrapper[4932]: 2025-11-25T09:13:06Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 25 09:13:08 crc kubenswrapper[4932]: /etc/init.d/functions: line 589: 484 Alarm clock "$@" Nov 25 09:13:08 crc kubenswrapper[4932]: > Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.348711 4932 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 25 09:13:08 crc kubenswrapper[4932]: command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: 2025-11-25T09:13:06Z|00001|fatal_signal|WARN|terminating with signal 14 (Alarm clock) Nov 25 09:13:08 crc kubenswrapper[4932]: /etc/init.d/functions: line 589: 484 Alarm clock "$@" Nov 25 09:13:08 crc kubenswrapper[4932]: > pod="openstack/ovn-controller-c26qd" podUID="b15edfd7-749d-45a4-9801-1eba98d77a5e" containerName="ovn-controller" containerID="cri-o://4bdba0a0070629dc89bd75eb2cd967b02a72c0ae20ab32bae70fe717dc0a8d8d" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.348749 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-c26qd" podUID="b15edfd7-749d-45a4-9801-1eba98d77a5e" containerName="ovn-controller" containerID="cri-o://4bdba0a0070629dc89bd75eb2cd967b02a72c0ae20ab32bae70fe717dc0a8d8d" gracePeriod=27 Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.361734 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-c26qd" podUID="b15edfd7-749d-45a4-9801-1eba98d77a5e" containerName="ovn-controller" probeResult="failure" output="" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.365657 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a9855d3c-818d-4804-add2-d6b0fce52613" (UID: "a9855d3c-818d-4804-add2-d6b0fce52613"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.383833 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a9855d3c-818d-4804-add2-d6b0fce52613" (UID: "a9855d3c-818d-4804-add2-d6b0fce52613"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.437841 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-config" (OuterVolumeSpecName: "config") pod "a9855d3c-818d-4804-add2-d6b0fce52613" (UID: "a9855d3c-818d-4804-add2-d6b0fce52613"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.450078 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.450136 4932 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.450150 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.494844 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a9855d3c-818d-4804-add2-d6b0fce52613" (UID: "a9855d3c-818d-4804-add2-d6b0fce52613"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.509039 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "28020cd8-f0a6-4aa9-80e6-4aa92b554850" (UID: "28020cd8-f0a6-4aa9-80e6-4aa92b554850"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.551013 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.551040 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28020cd8-f0a6-4aa9-80e6-4aa92b554850-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.551093 4932 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.551137 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data podName:969d317e-0787-44a8-8e27-554b0e887444 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:12.551122845 +0000 UTC m=+1452.677152408 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data") pod "rabbitmq-server-0" (UID: "969d317e-0787-44a8-8e27-554b0e887444") : configmap "rabbitmq-config-data" not found Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.574397 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a9855d3c-818d-4804-add2-d6b0fce52613" (UID: "a9855d3c-818d-4804-add2-d6b0fce52613"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.579892 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" (UID: "15f7fd9d-7a12-4f06-9f9e-d9e4d059039f"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: W1125 09:13:08.599709 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod838bc013_33ba_4722_be1d_b88c9016c83a.slice/crio-6ccbe14fa5d6adc329e2bc92ddd3490d42d4b5b61e6ac29bbebcbaafa5b962e2 WatchSource:0}: Error finding container 6ccbe14fa5d6adc329e2bc92ddd3490d42d4b5b61e6ac29bbebcbaafa5b962e2: Status 404 returned error can't find the container with id 6ccbe14fa5d6adc329e2bc92ddd3490d42d4b5b61e6ac29bbebcbaafa5b962e2 Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.618400 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982 is running failed: container process not found" containerID="bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.619024 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data" (OuterVolumeSpecName: "config-data") pod "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" (UID: "52ff2b1b-8756-4ec2-92b6-54c1d005d1cc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.620651 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982 is running failed: container process not found" containerID="bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.621538 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982 is running failed: container process not found" containerID="bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 25 09:13:08 crc kubenswrapper[4932]: E1125 09:13:08.621583 4932 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982 is running failed: container process not found" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="2023df73-6a92-4838-8d5e-31f533796950" containerName="galera" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.626963 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28020cd8-f0a6-4aa9-80e6-4aa92b554850" path="/var/lib/kubelet/pods/28020cd8-f0a6-4aa9-80e6-4aa92b554850/volumes" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.652341 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a9855d3c-818d-4804-add2-d6b0fce52613-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.652377 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.652393 4932 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.656405 4932 scope.go:117] "RemoveContainer" containerID="8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.780637 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5373bec8-828a-4e9b-b0fd-6a0ef84375de/ovsdbserver-nb/0.log" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.780703 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.881346 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/placement-5cfb6b64bb-8mrcr" podUID="7a1917d6-4455-4cf5-b932-a38584663b02" containerName="placement-api" probeResult="failure" output="Get \"https://10.217.0.161:8778/\": read tcp 10.217.0.2:49210->10.217.0.161:8778: read: connection reset by peer" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.881404 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/placement-5cfb6b64bb-8mrcr" podUID="7a1917d6-4455-4cf5-b932-a38584663b02" containerName="placement-log" probeResult="failure" output="Get \"https://10.217.0.161:8778/\": read tcp 10.217.0.2:49198->10.217.0.161:8778: read: connection reset by peer" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.913513 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance5148-account-delete-fbhmq" event={"ID":"36140bfd-540f-40b6-8521-a8a3d408dc9d","Type":"ContainerStarted","Data":"9da538ff24693cc88be6d797f79f0c525f21d6f780f1b556685c1ae89c18e1f3"} Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.918386 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5373bec8-828a-4e9b-b0fd-6a0ef84375de/ovsdbserver-nb/0.log" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.918458 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5373bec8-828a-4e9b-b0fd-6a0ef84375de","Type":"ContainerDied","Data":"c2bf490555c3e5cccee9eaf846bab33c361726b160f9da6ba6fa94375404b147"} Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.918550 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.924224 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder71a7-account-delete-wslds" event={"ID":"633c3722-e337-4b6a-98fe-451ac451dd06","Type":"ContainerStarted","Data":"893f8b1e22bf0830d6a92bee6871a9b7767d8b5a153f1818e27f1d2c9e623b8f"} Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.937445 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"52ff2b1b-8756-4ec2-92b6-54c1d005d1cc","Type":"ContainerDied","Data":"95363e93c3264c1a7c8cec0f0f8120329a0b0f2f0d924b428548af444ee5a9b7"} Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.949947 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican8b4d-account-delete-dprdr" event={"ID":"dbc1ab9c-f494-4ce9-8758-d5c724e4413a","Type":"ContainerStarted","Data":"20483f875de433df9cd085258b11bc2dcdce538a7d297cd953e26322a9c7d47d"} Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.953500 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron4147-account-delete-qzg4q" event={"ID":"a181c094-1cf9-42bd-b038-cc8a6f437aa3","Type":"ContainerStarted","Data":"8862c2da11a079c7703f4807d33328aec442b1e3307f2cd8b23df468be562b63"} Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.958568 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44krg\" (UniqueName: \"kubernetes.io/projected/5373bec8-828a-4e9b-b0fd-6a0ef84375de-kube-api-access-44krg\") pod \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.958692 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.958724 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-combined-ca-bundle\") pod \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.958763 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdbserver-nb-tls-certs\") pod \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.958837 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdb-rundir\") pod \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.958859 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-config\") pod \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.958909 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-scripts\") pod \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.958930 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-metrics-certs-tls-certs\") pod \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\" (UID: \"5373bec8-828a-4e9b-b0fd-6a0ef84375de\") " Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.961651 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "5373bec8-828a-4e9b-b0fd-6a0ef84375de" (UID: "5373bec8-828a-4e9b-b0fd-6a0ef84375de"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.961713 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-scripts" (OuterVolumeSpecName: "scripts") pod "5373bec8-828a-4e9b-b0fd-6a0ef84375de" (UID: "5373bec8-828a-4e9b-b0fd-6a0ef84375de"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.961748 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-config" (OuterVolumeSpecName: "config") pod "5373bec8-828a-4e9b-b0fd-6a0ef84375de" (UID: "5373bec8-828a-4e9b-b0fd-6a0ef84375de"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.963719 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "5373bec8-828a-4e9b-b0fd-6a0ef84375de" (UID: "5373bec8-828a-4e9b-b0fd-6a0ef84375de"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.981325 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5373bec8-828a-4e9b-b0fd-6a0ef84375de-kube-api-access-44krg" (OuterVolumeSpecName: "kube-api-access-44krg") pod "5373bec8-828a-4e9b-b0fd-6a0ef84375de" (UID: "5373bec8-828a-4e9b-b0fd-6a0ef84375de"). InnerVolumeSpecName "kube-api-access-44krg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.984574 4932 generic.go:334] "Generic (PLEG): container finished" podID="ce711acf-071a-4387-8c42-e2f3f8c25df9" containerID="83bb76627ac457d2fce7c0ce9e6259515a96f4e8e6bad2fb47530ca487cba1e3" exitCode=0 Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.985472 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementb0bd-account-delete-vxbgw" event={"ID":"ce711acf-071a-4387-8c42-e2f3f8c25df9","Type":"ContainerDied","Data":"83bb76627ac457d2fce7c0ce9e6259515a96f4e8e6bad2fb47530ca487cba1e3"} Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.995733 4932 generic.go:334] "Generic (PLEG): container finished" podID="d1c39090-1743-40c3-95d5-71f5ca126c96" containerID="abe66e4f341b24534642787b92c4263f4ebf66e15aa3b9d673ff051b62fba4b5" exitCode=0 Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.996044 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6d444df75c-9wqvx" event={"ID":"d1c39090-1743-40c3-95d5-71f5ca126c96","Type":"ContainerDied","Data":"abe66e4f341b24534642787b92c4263f4ebf66e15aa3b9d673ff051b62fba4b5"} Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.996072 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6d444df75c-9wqvx" event={"ID":"d1c39090-1743-40c3-95d5-71f5ca126c96","Type":"ContainerDied","Data":"1f07dc6e3934d752e43ef44bc4ca25b8cd98c2b8b4282bb5a0f98fea0d0ddebf"} Nov 25 09:13:08 crc kubenswrapper[4932]: I1125 09:13:08.996084 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1f07dc6e3934d752e43ef44bc4ca25b8cd98c2b8b4282bb5a0f98fea0d0ddebf" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.005951 4932 generic.go:334] "Generic (PLEG): container finished" podID="a83ee8ae-69d7-4ca5-ade1-9d2450880338" containerID="be76288d747fd77398730e153b2bfa8b05e410e8971bd296d8c9d0bb4df3ac3b" exitCode=0 Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.006004 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" event={"ID":"a83ee8ae-69d7-4ca5-ade1-9d2450880338","Type":"ContainerDied","Data":"be76288d747fd77398730e153b2bfa8b05e410e8971bd296d8c9d0bb4df3ac3b"} Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.010400 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi7e0a-account-delete-drmkw" event={"ID":"838bc013-33ba-4722-be1d-b88c9016c83a","Type":"ContainerStarted","Data":"6ccbe14fa5d6adc329e2bc92ddd3490d42d4b5b61e6ac29bbebcbaafa5b962e2"} Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.012701 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5373bec8-828a-4e9b-b0fd-6a0ef84375de" (UID: "5373bec8-828a-4e9b-b0fd-6a0ef84375de"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.015250 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0a937-account-delete-czmhb" event={"ID":"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71","Type":"ContainerStarted","Data":"73ab8f53236afbf1b63ad6d5ee094552e53704f8d58923766e8b1674e1978978"} Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.016783 4932 generic.go:334] "Generic (PLEG): container finished" podID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerID="53bcbc203394b3c852ba1c6182bc8eaf5e1970de1e3b7f900c6947cac59286d4" exitCode=0 Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.016825 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f5484589f-8gmzk" event={"ID":"9e365f51-6fe5-47b3-b183-5cf5cae5c65e","Type":"ContainerDied","Data":"53bcbc203394b3c852ba1c6182bc8eaf5e1970de1e3b7f900c6947cac59286d4"} Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.032065 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-c26qd_b15edfd7-749d-45a4-9801-1eba98d77a5e/ovn-controller/0.log" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.032111 4932 generic.go:334] "Generic (PLEG): container finished" podID="b15edfd7-749d-45a4-9801-1eba98d77a5e" containerID="4bdba0a0070629dc89bd75eb2cd967b02a72c0ae20ab32bae70fe717dc0a8d8d" exitCode=143 Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.032182 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c26qd" event={"ID":"b15edfd7-749d-45a4-9801-1eba98d77a5e","Type":"ContainerDied","Data":"4bdba0a0070629dc89bd75eb2cd967b02a72c0ae20ab32bae70fe717dc0a8d8d"} Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.032220 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-c26qd" event={"ID":"b15edfd7-749d-45a4-9801-1eba98d77a5e","Type":"ContainerDied","Data":"e3c0671e83e049c98e6dea9592babef2511da601289ef9e98206d771e6678a14"} Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.032230 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3c0671e83e049c98e6dea9592babef2511da601289ef9e98206d771e6678a14" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.040440 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"15f7fd9d-7a12-4f06-9f9e-d9e4d059039f","Type":"ContainerDied","Data":"ecb948a960cfa3f7ab50a760d6fb2b5cd5651f5b1ba1274dae9c83117830bd7d"} Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.042567 4932 generic.go:334] "Generic (PLEG): container finished" podID="2023df73-6a92-4838-8d5e-31f533796950" containerID="bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982" exitCode=0 Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.042597 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2023df73-6a92-4838-8d5e-31f533796950","Type":"ContainerDied","Data":"bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982"} Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.042642 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"2023df73-6a92-4838-8d5e-31f533796950","Type":"ContainerDied","Data":"2b75d501b48082379bb24cd31682856a5721c062d0cf04aeaffea55f2ea1210e"} Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.042655 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b75d501b48082379bb24cd31682856a5721c062d0cf04aeaffea55f2ea1210e" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.072213 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.072388 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5373bec8-828a-4e9b-b0fd-6a0ef84375de-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.072402 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44krg\" (UniqueName: \"kubernetes.io/projected/5373bec8-828a-4e9b-b0fd-6a0ef84375de-kube-api-access-44krg\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.072430 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.072442 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.072452 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.096513 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.107457 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "5373bec8-828a-4e9b-b0fd-6a0ef84375de" (UID: "5373bec8-828a-4e9b-b0fd-6a0ef84375de"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.156428 4932 scope.go:117] "RemoveContainer" containerID="281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.161888 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.178:9292/healthcheck\": read tcp 10.217.0.2:52228->10.217.0.178:9292: read: connection reset by peer" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.161910 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.178:9292/healthcheck\": read tcp 10.217.0.2:52226->10.217.0.178:9292: read: connection reset by peer" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.162060 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "5373bec8-828a-4e9b-b0fd-6a0ef84375de" (UID: "5373bec8-828a-4e9b-b0fd-6a0ef84375de"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.174790 4932 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.174826 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.174837 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5373bec8-828a-4e9b-b0fd-6a0ef84375de-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.181995 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="c7865402-5a21-44f9-9436-d5d1bab67a07" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.184:8776/healthcheck\": read tcp 10.217.0.2:51806->10.217.0.184:8776: read: connection reset by peer" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.428141 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-7f5484589f-8gmzk" podUID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.162:8080/healthcheck\": dial tcp 10.217.0.162:8080: connect: connection refused" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.428171 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-7f5484589f-8gmzk" podUID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.162:8080/healthcheck\": dial tcp 10.217.0.162:8080: connect: connection refused" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.456942 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.544144 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": dial tcp 10.217.0.207:8775: connect: connection refused" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.544485 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": dial tcp 10.217.0.207:8775: connect: connection refused" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.573636 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.597119 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2023df73-6a92-4838-8d5e-31f533796950-config-data-generated\") pod \"2023df73-6a92-4838-8d5e-31f533796950\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.597249 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-config-data-default\") pod \"2023df73-6a92-4838-8d5e-31f533796950\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.597273 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdmb4\" (UniqueName: \"kubernetes.io/projected/2023df73-6a92-4838-8d5e-31f533796950-kube-api-access-jdmb4\") pod \"2023df73-6a92-4838-8d5e-31f533796950\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.597301 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-kolla-config\") pod \"2023df73-6a92-4838-8d5e-31f533796950\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.597363 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-galera-tls-certs\") pod \"2023df73-6a92-4838-8d5e-31f533796950\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.597441 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"2023df73-6a92-4838-8d5e-31f533796950\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.597459 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-combined-ca-bundle\") pod \"2023df73-6a92-4838-8d5e-31f533796950\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.597475 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-operator-scripts\") pod \"2023df73-6a92-4838-8d5e-31f533796950\" (UID: \"2023df73-6a92-4838-8d5e-31f533796950\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.598892 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2023df73-6a92-4838-8d5e-31f533796950" (UID: "2023df73-6a92-4838-8d5e-31f533796950"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.600448 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "2023df73-6a92-4838-8d5e-31f533796950" (UID: "2023df73-6a92-4838-8d5e-31f533796950"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.601701 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "2023df73-6a92-4838-8d5e-31f533796950" (UID: "2023df73-6a92-4838-8d5e-31f533796950"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.602464 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2023df73-6a92-4838-8d5e-31f533796950-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "2023df73-6a92-4838-8d5e-31f533796950" (UID: "2023df73-6a92-4838-8d5e-31f533796950"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.616851 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-c26qd_b15edfd7-749d-45a4-9801-1eba98d77a5e/ovn-controller/0.log" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.616908 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.621505 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.623341 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2023df73-6a92-4838-8d5e-31f533796950-kube-api-access-jdmb4" (OuterVolumeSpecName: "kube-api-access-jdmb4") pod "2023df73-6a92-4838-8d5e-31f533796950" (UID: "2023df73-6a92-4838-8d5e-31f533796950"). InnerVolumeSpecName "kube-api-access-jdmb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.632098 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-fbk5k"] Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.637658 4932 scope.go:117] "RemoveContainer" containerID="8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.642906 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-fbk5k"] Nov 25 09:13:09 crc kubenswrapper[4932]: E1125 09:13:09.642936 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140\": container with ID starting with 8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140 not found: ID does not exist" containerID="8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.642977 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140"} err="failed to get container status \"8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140\": rpc error: code = NotFound desc = could not find container \"8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140\": container with ID starting with 8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140 not found: ID does not exist" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.642998 4932 scope.go:117] "RemoveContainer" containerID="281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34" Nov 25 09:13:09 crc kubenswrapper[4932]: E1125 09:13:09.649299 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34\": container with ID starting with 281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34 not found: ID does not exist" containerID="281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.649334 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34"} err="failed to get container status \"281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34\": rpc error: code = NotFound desc = could not find container \"281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34\": container with ID starting with 281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34 not found: ID does not exist" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.649356 4932 scope.go:117] "RemoveContainer" containerID="b8a9c7b4d8aee9148d3f80ec0b3f039c905ef40f6d4c9c1480a1255ba2197d40" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.650564 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.662140 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.681402 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.690001 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.699379 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-combined-ca-bundle\") pod \"d1c39090-1743-40c3-95d5-71f5ca126c96\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.699439 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data-custom\") pod \"d1c39090-1743-40c3-95d5-71f5ca126c96\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.699511 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmllz\" (UniqueName: \"kubernetes.io/projected/d1c39090-1743-40c3-95d5-71f5ca126c96-kube-api-access-bmllz\") pod \"d1c39090-1743-40c3-95d5-71f5ca126c96\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.699567 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1c39090-1743-40c3-95d5-71f5ca126c96-logs\") pod \"d1c39090-1743-40c3-95d5-71f5ca126c96\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.699635 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data\") pod \"d1c39090-1743-40c3-95d5-71f5ca126c96\" (UID: \"d1c39090-1743-40c3-95d5-71f5ca126c96\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.700440 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2023df73-6a92-4838-8d5e-31f533796950-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.700467 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.700482 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdmb4\" (UniqueName: \"kubernetes.io/projected/2023df73-6a92-4838-8d5e-31f533796950-kube-api-access-jdmb4\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.700494 4932 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.700506 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2023df73-6a92-4838-8d5e-31f533796950-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.702554 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.705000 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6c674848fb-kcq2h" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.156:9311/healthcheck\": dial tcp 10.217.0.156:9311: connect: connection refused" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.705379 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-6c674848fb-kcq2h" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.156:9311/healthcheck\": dial tcp 10.217.0.156:9311: connect: connection refused" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.705699 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1c39090-1743-40c3-95d5-71f5ca126c96-logs" (OuterVolumeSpecName: "logs") pod "d1c39090-1743-40c3-95d5-71f5ca126c96" (UID: "d1c39090-1743-40c3-95d5-71f5ca126c96"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.706329 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "mysql-db") pod "2023df73-6a92-4838-8d5e-31f533796950" (UID: "2023df73-6a92-4838-8d5e-31f533796950"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.729140 4932 scope.go:117] "RemoveContainer" containerID="31f10fa596503c558e74a122048e857cd990eaba6b0da87eb56ec3d77736763e" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.735673 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.741300 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.747069 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-qbfss"] Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.755350 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-qbfss"] Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.756496 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1c39090-1743-40c3-95d5-71f5ca126c96-kube-api-access-bmllz" (OuterVolumeSpecName: "kube-api-access-bmllz") pod "d1c39090-1743-40c3-95d5-71f5ca126c96" (UID: "d1c39090-1743-40c3-95d5-71f5ca126c96"). InnerVolumeSpecName "kube-api-access-bmllz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.782384 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d1c39090-1743-40c3-95d5-71f5ca126c96" (UID: "d1c39090-1743-40c3-95d5-71f5ca126c96"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.798067 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802023 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run-ovn\") pod \"b15edfd7-749d-45a4-9801-1eba98d77a5e\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802088 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbqzt\" (UniqueName: \"kubernetes.io/projected/b15edfd7-749d-45a4-9801-1eba98d77a5e-kube-api-access-hbqzt\") pod \"b15edfd7-749d-45a4-9801-1eba98d77a5e\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802114 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data-custom\") pod \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802144 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jzsz\" (UniqueName: \"kubernetes.io/projected/a83ee8ae-69d7-4ca5-ade1-9d2450880338-kube-api-access-2jzsz\") pod \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802180 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-ovn-controller-tls-certs\") pod \"b15edfd7-749d-45a4-9801-1eba98d77a5e\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802240 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data\") pod \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802306 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a83ee8ae-69d7-4ca5-ade1-9d2450880338-logs\") pod \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802347 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-log-ovn\") pod \"b15edfd7-749d-45a4-9801-1eba98d77a5e\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802397 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-combined-ca-bundle\") pod \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\" (UID: \"a83ee8ae-69d7-4ca5-ade1-9d2450880338\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802447 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run\") pod \"b15edfd7-749d-45a4-9801-1eba98d77a5e\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802470 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b15edfd7-749d-45a4-9801-1eba98d77a5e-scripts\") pod \"b15edfd7-749d-45a4-9801-1eba98d77a5e\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802490 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-combined-ca-bundle\") pod \"b15edfd7-749d-45a4-9801-1eba98d77a5e\" (UID: \"b15edfd7-749d-45a4-9801-1eba98d77a5e\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802828 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmllz\" (UniqueName: \"kubernetes.io/projected/d1c39090-1743-40c3-95d5-71f5ca126c96-kube-api-access-bmllz\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802844 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1c39090-1743-40c3-95d5-71f5ca126c96-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802861 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.802872 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.816302 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "b15edfd7-749d-45a4-9801-1eba98d77a5e" (UID: "b15edfd7-749d-45a4-9801-1eba98d77a5e"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.816513 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run" (OuterVolumeSpecName: "var-run") pod "b15edfd7-749d-45a4-9801-1eba98d77a5e" (UID: "b15edfd7-749d-45a4-9801-1eba98d77a5e"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.816608 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a83ee8ae-69d7-4ca5-ade1-9d2450880338-logs" (OuterVolumeSpecName: "logs") pod "a83ee8ae-69d7-4ca5-ade1-9d2450880338" (UID: "a83ee8ae-69d7-4ca5-ade1-9d2450880338"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.817749 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b15edfd7-749d-45a4-9801-1eba98d77a5e-scripts" (OuterVolumeSpecName: "scripts") pod "b15edfd7-749d-45a4-9801-1eba98d77a5e" (UID: "b15edfd7-749d-45a4-9801-1eba98d77a5e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.817798 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.820724 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "b15edfd7-749d-45a4-9801-1eba98d77a5e" (UID: "b15edfd7-749d-45a4-9801-1eba98d77a5e"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.888678 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a83ee8ae-69d7-4ca5-ade1-9d2450880338" (UID: "a83ee8ae-69d7-4ca5-ade1-9d2450880338"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.904756 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7h5z9\" (UniqueName: \"kubernetes.io/projected/7a1917d6-4455-4cf5-b932-a38584663b02-kube-api-access-7h5z9\") pod \"7a1917d6-4455-4cf5-b932-a38584663b02\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.904963 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-config-data\") pod \"7a1917d6-4455-4cf5-b932-a38584663b02\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.905106 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-public-tls-certs\") pod \"7a1917d6-4455-4cf5-b932-a38584663b02\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.905142 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a1917d6-4455-4cf5-b932-a38584663b02-logs\") pod \"7a1917d6-4455-4cf5-b932-a38584663b02\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.905185 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-internal-tls-certs\") pod \"7a1917d6-4455-4cf5-b932-a38584663b02\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.905247 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-scripts\") pod \"7a1917d6-4455-4cf5-b932-a38584663b02\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.905331 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-combined-ca-bundle\") pod \"7a1917d6-4455-4cf5-b932-a38584663b02\" (UID: \"7a1917d6-4455-4cf5-b932-a38584663b02\") " Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.905855 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a83ee8ae-69d7-4ca5-ade1-9d2450880338-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.905874 4932 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.905882 4932 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.905890 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b15edfd7-749d-45a4-9801-1eba98d77a5e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.905898 4932 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/b15edfd7-749d-45a4-9801-1eba98d77a5e-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.905906 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.908516 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a1917d6-4455-4cf5-b932-a38584663b02-logs" (OuterVolumeSpecName: "logs") pod "7a1917d6-4455-4cf5-b932-a38584663b02" (UID: "7a1917d6-4455-4cf5-b932-a38584663b02"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.915021 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b15edfd7-749d-45a4-9801-1eba98d77a5e-kube-api-access-hbqzt" (OuterVolumeSpecName: "kube-api-access-hbqzt") pod "b15edfd7-749d-45a4-9801-1eba98d77a5e" (UID: "b15edfd7-749d-45a4-9801-1eba98d77a5e"). InnerVolumeSpecName "kube-api-access-hbqzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.930948 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a83ee8ae-69d7-4ca5-ade1-9d2450880338-kube-api-access-2jzsz" (OuterVolumeSpecName: "kube-api-access-2jzsz") pod "a83ee8ae-69d7-4ca5-ade1-9d2450880338" (UID: "a83ee8ae-69d7-4ca5-ade1-9d2450880338"). InnerVolumeSpecName "kube-api-access-2jzsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.936216 4932 scope.go:117] "RemoveContainer" containerID="3aa7ad743c9d91e2340b3e3408429966ea1670cfd6e520674b81c2217ef12e5e" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.967016 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-scripts" (OuterVolumeSpecName: "scripts") pod "7a1917d6-4455-4cf5-b932-a38584663b02" (UID: "7a1917d6-4455-4cf5-b932-a38584663b02"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:09 crc kubenswrapper[4932]: I1125 09:13:09.967215 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a1917d6-4455-4cf5-b932-a38584663b02-kube-api-access-7h5z9" (OuterVolumeSpecName: "kube-api-access-7h5z9") pod "7a1917d6-4455-4cf5-b932-a38584663b02" (UID: "7a1917d6-4455-4cf5-b932-a38584663b02"). InnerVolumeSpecName "kube-api-access-7h5z9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.007305 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7h5z9\" (UniqueName: \"kubernetes.io/projected/7a1917d6-4455-4cf5-b932-a38584663b02-kube-api-access-7h5z9\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.007335 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7a1917d6-4455-4cf5-b932-a38584663b02-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.007344 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.007354 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbqzt\" (UniqueName: \"kubernetes.io/projected/b15edfd7-749d-45a4-9801-1eba98d77a5e-kube-api-access-hbqzt\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.007363 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jzsz\" (UniqueName: \"kubernetes.io/projected/a83ee8ae-69d7-4ca5-ade1-9d2450880338-kube-api-access-2jzsz\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.053364 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder71a7-account-delete-wslds" event={"ID":"633c3722-e337-4b6a-98fe-451ac451dd06","Type":"ContainerStarted","Data":"bdf7364e9fd604703103c20638764530f1a8592227c9d8357f0450708d123579"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.057988 4932 generic.go:334] "Generic (PLEG): container finished" podID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerID="bcb0b33d20667e08d805c88572654c89aed61e0f969c78fc5ef9ec57be99532f" exitCode=0 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.058047 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c674848fb-kcq2h" event={"ID":"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee","Type":"ContainerDied","Data":"bcb0b33d20667e08d805c88572654c89aed61e0f969c78fc5ef9ec57be99532f"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.059840 4932 generic.go:334] "Generic (PLEG): container finished" podID="c5101ae2-5106-48c7-9116-4c0e5ededb84" containerID="72f71cf73b9865b04d4d3de5c8547c8ca66dceb1900d89f1ff42c5d833013afd" exitCode=0 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.059877 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c5101ae2-5106-48c7-9116-4c0e5ededb84","Type":"ContainerDied","Data":"72f71cf73b9865b04d4d3de5c8547c8ca66dceb1900d89f1ff42c5d833013afd"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.062840 4932 generic.go:334] "Generic (PLEG): container finished" podID="7a1917d6-4455-4cf5-b932-a38584663b02" containerID="37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073" exitCode=0 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.062887 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5cfb6b64bb-8mrcr" event={"ID":"7a1917d6-4455-4cf5-b932-a38584663b02","Type":"ContainerDied","Data":"37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.062903 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5cfb6b64bb-8mrcr" event={"ID":"7a1917d6-4455-4cf5-b932-a38584663b02","Type":"ContainerDied","Data":"b81c5bb52f33b60fe0a42b50c16ccd2d473cb0b0834a2dd25ffd6852cefa5228"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.062965 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5cfb6b64bb-8mrcr" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.065112 4932 generic.go:334] "Generic (PLEG): container finished" podID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerID="b8682f71f2ee6925b54df3f64b25f4f743542faa8879099318a3b2e0226e6888" exitCode=0 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.082831 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61","Type":"ContainerDied","Data":"b8682f71f2ee6925b54df3f64b25f4f743542faa8879099318a3b2e0226e6888"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.084031 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder71a7-account-delete-wslds" podStartSLOduration=6.083999633 podStartE2EDuration="6.083999633s" podCreationTimestamp="2025-11-25 09:13:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:13:10.081200852 +0000 UTC m=+1450.207230435" watchObservedRunningTime="2025-11-25 09:13:10.083999633 +0000 UTC m=+1450.210029196" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.090456 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron4147-account-delete-qzg4q" event={"ID":"a181c094-1cf9-42bd-b038-cc8a6f437aa3","Type":"ContainerStarted","Data":"86482ae8db87016684a457f31b6613af3655be261977713614c37a0b9fd0465f"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.100995 4932 generic.go:334] "Generic (PLEG): container finished" podID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerID="bb6122d938bb9d23fba3db816a5bc8cfafc993ae9ba9ebacdda64a4c57056966" exitCode=0 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.101058 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f5484589f-8gmzk" event={"ID":"9e365f51-6fe5-47b3-b183-5cf5cae5c65e","Type":"ContainerDied","Data":"bb6122d938bb9d23fba3db816a5bc8cfafc993ae9ba9ebacdda64a4c57056966"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.102573 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" event={"ID":"a83ee8ae-69d7-4ca5-ade1-9d2450880338","Type":"ContainerDied","Data":"891b2037453285938f63ad02841b01b04841feabe49d02f26e783bcd24edc323"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.102653 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-76dfd47846-vpn45" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.105836 4932 generic.go:334] "Generic (PLEG): container finished" podID="c7865402-5a21-44f9-9436-d5d1bab67a07" containerID="80df99d51a793387f4befd153965af902fa51eff5beea4589846bd522aef8f83" exitCode=0 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.105926 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7865402-5a21-44f9-9436-d5d1bab67a07","Type":"ContainerDied","Data":"80df99d51a793387f4befd153965af902fa51eff5beea4589846bd522aef8f83"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.109294 4932 generic.go:334] "Generic (PLEG): container finished" podID="31823923-9ce9-49e0-b4c1-42418d49918c" containerID="7247d6a20300098ab3cb5a4ccdeaecb8b01f9585ec29af77a8b23a178fb313d8" exitCode=0 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.109354 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31823923-9ce9-49e0-b4c1-42418d49918c","Type":"ContainerDied","Data":"7247d6a20300098ab3cb5a4ccdeaecb8b01f9585ec29af77a8b23a178fb313d8"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.112628 4932 generic.go:334] "Generic (PLEG): container finished" podID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerID="7d27611ad3f8e0e548937326ec5872d5fd17ef030c916731538091ee33f8c092" exitCode=0 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.112727 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6d444df75c-9wqvx" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.112806 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"90c30cef-5376-4f4a-8d59-9ab6daff902d","Type":"ContainerDied","Data":"7d27611ad3f8e0e548937326ec5872d5fd17ef030c916731538091ee33f8c092"} Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.112975 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-c26qd" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.113114 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.124396 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron4147-account-delete-qzg4q" podStartSLOduration=6.124379144 podStartE2EDuration="6.124379144s" podCreationTimestamp="2025-11-25 09:13:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:13:10.118008809 +0000 UTC m=+1450.244038362" watchObservedRunningTime="2025-11-25 09:13:10.124379144 +0000 UTC m=+1450.250408707" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.473101 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.482762 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.483062 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="ceilometer-central-agent" containerID="cri-o://d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4" gracePeriod=30 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.483447 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="proxy-httpd" containerID="cri-o://44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d" gracePeriod=30 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.483687 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="sg-core" containerID="cri-o://5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028" gracePeriod=30 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.484172 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="ceilometer-notification-agent" containerID="cri-o://b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2" gracePeriod=30 Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.522560 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.556626 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2023df73-6a92-4838-8d5e-31f533796950" (UID: "2023df73-6a92-4838-8d5e-31f533796950"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.557027 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.561709 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="31866cc1-ccc2-4ffc-8de9-4651a1aa41ad" containerName="kube-state-metrics" containerID="cri-o://c99b5f0370ed3831068a8fcc89de815c298aa1e1d8bcee1c409429deb6c5c99a" gracePeriod=30 Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.564267 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d is running failed: container process not found" containerID="75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.572863 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d is running failed: container process not found" containerID="75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.575393 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d is running failed: container process not found" containerID="75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.575457 4932 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="b57cfb59-e562-4fb2-bfad-b4cf5382c45a" containerName="nova-cell0-conductor-conductor" Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.591722 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2cf6819b94d62fccf47fe857c4bedbcb3672a422e4bfda3c5103104951af3ed6" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.594070 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.605160 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.605330 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.605370 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2cf6819b94d62fccf47fe857c4bedbcb3672a422e4bfda3c5103104951af3ed6" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.605510 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a83ee8ae-69d7-4ca5-ade1-9d2450880338" (UID: "a83ee8ae-69d7-4ca5-ade1-9d2450880338"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.610622 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.610682 4932 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server" Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.620692 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="2cf6819b94d62fccf47fe857c4bedbcb3672a422e4bfda3c5103104951af3ed6" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.621028 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerName="ovn-northd" Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.627348 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.628512 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.628539 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.637155 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:10 crc kubenswrapper[4932]: E1125 09:13:10.637240 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovs-vswitchd" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.642857 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b15edfd7-749d-45a4-9801-1eba98d77a5e" (UID: "b15edfd7-749d-45a4-9801-1eba98d77a5e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.698210 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1c39090-1743-40c3-95d5-71f5ca126c96" (UID: "d1c39090-1743-40c3-95d5-71f5ca126c96"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.715737 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" path="/var/lib/kubelet/pods/15f7fd9d-7a12-4f06-9f9e-d9e4d059039f/volumes" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.733804 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" path="/var/lib/kubelet/pods/52ff2b1b-8756-4ec2-92b6-54c1d005d1cc/volumes" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.734670 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.734700 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.746440 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5373bec8-828a-4e9b-b0fd-6a0ef84375de" path="/var/lib/kubelet/pods/5373bec8-828a-4e9b-b0fd-6a0ef84375de/volumes" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.764629 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9855d3c-818d-4804-add2-d6b0fce52613" path="/var/lib/kubelet/pods/a9855d3c-818d-4804-add2-d6b0fce52613/volumes" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.767582 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c16a4087-2597-4662-880f-80a7a2a78ef2" path="/var/lib/kubelet/pods/c16a4087-2597-4662-880f-80a7a2a78ef2/volumes" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.768486 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" path="/var/lib/kubelet/pods/f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce/volumes" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.878516 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "2023df73-6a92-4838-8d5e-31f533796950" (UID: "2023df73-6a92-4838-8d5e-31f533796950"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.880975 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data" (OuterVolumeSpecName: "config-data") pod "d1c39090-1743-40c3-95d5-71f5ca126c96" (UID: "d1c39090-1743-40c3-95d5-71f5ca126c96"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.940769 4932 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2023df73-6a92-4838-8d5e-31f533796950-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.940802 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1c39090-1743-40c3-95d5-71f5ca126c96-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.945673 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-config-data" (OuterVolumeSpecName: "config-data") pod "7a1917d6-4455-4cf5-b932-a38584663b02" (UID: "7a1917d6-4455-4cf5-b932-a38584663b02"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.954693 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a1917d6-4455-4cf5-b932-a38584663b02" (UID: "7a1917d6-4455-4cf5-b932-a38584663b02"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.964577 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data" (OuterVolumeSpecName: "config-data") pod "a83ee8ae-69d7-4ca5-ade1-9d2450880338" (UID: "a83ee8ae-69d7-4ca5-ade1-9d2450880338"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.986966 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "b15edfd7-749d-45a4-9801-1eba98d77a5e" (UID: "b15edfd7-749d-45a4-9801-1eba98d77a5e"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:10 crc kubenswrapper[4932]: I1125 09:13:10.991322 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7a1917d6-4455-4cf5-b932-a38584663b02" (UID: "7a1917d6-4455-4cf5-b932-a38584663b02"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.045919 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a83ee8ae-69d7-4ca5-ade1-9d2450880338-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.045940 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.045949 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.045957 4932 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/b15edfd7-749d-45a4-9801-1eba98d77a5e-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.045966 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.078025 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7a1917d6-4455-4cf5-b932-a38584663b02" (UID: "7a1917d6-4455-4cf5-b932-a38584663b02"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.136023 4932 generic.go:334] "Generic (PLEG): container finished" podID="a181c094-1cf9-42bd-b038-cc8a6f437aa3" containerID="86482ae8db87016684a457f31b6613af3655be261977713614c37a0b9fd0465f" exitCode=0 Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.139852 4932 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novaapi7e0a-account-delete-drmkw" secret="" err="secret \"galera-openstack-dockercfg-82gjf\" not found" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.150028 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7a1917d6-4455-4cf5-b932-a38584663b02-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.152541 4932 generic.go:334] "Generic (PLEG): container finished" podID="90db5718-c185-4863-888a-6cb41ca5339d" containerID="44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d" exitCode=0 Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.152581 4932 generic.go:334] "Generic (PLEG): container finished" podID="90db5718-c185-4863-888a-6cb41ca5339d" containerID="5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028" exitCode=2 Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.152676 4932 generic.go:334] "Generic (PLEG): container finished" podID="90db5718-c185-4863-888a-6cb41ca5339d" containerID="d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4" exitCode=0 Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.155621 4932 generic.go:334] "Generic (PLEG): container finished" podID="633c3722-e337-4b6a-98fe-451ac451dd06" containerID="bdf7364e9fd604703103c20638764530f1a8592227c9d8357f0450708d123579" exitCode=0 Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.159021 4932 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novacell0a937-account-delete-czmhb" secret="" err="secret \"galera-openstack-dockercfg-82gjf\" not found" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.170213 4932 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/barbican8b4d-account-delete-dprdr" secret="" err="secret \"galera-openstack-dockercfg-82gjf\" not found" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.192463 4932 generic.go:334] "Generic (PLEG): container finished" podID="b57cfb59-e562-4fb2-bfad-b4cf5382c45a" containerID="75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d" exitCode=0 Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.201506 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novaapi7e0a-account-delete-drmkw" podStartSLOduration=7.20148995 podStartE2EDuration="7.20148995s" podCreationTimestamp="2025-11-25 09:13:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:13:11.171349206 +0000 UTC m=+1451.297378779" watchObservedRunningTime="2025-11-25 09:13:11.20148995 +0000 UTC m=+1451.327519503" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.204456 4932 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/glance5148-account-delete-fbhmq" secret="" err="secret \"galera-openstack-dockercfg-82gjf\" not found" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.206410 4932 generic.go:334] "Generic (PLEG): container finished" podID="31866cc1-ccc2-4ffc-8de9-4651a1aa41ad" containerID="c99b5f0370ed3831068a8fcc89de815c298aa1e1d8bcee1c409429deb6c5c99a" exitCode=2 Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.249026 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican8b4d-account-delete-dprdr" podStartSLOduration=7.249007408 podStartE2EDuration="7.249007408s" podCreationTimestamp="2025-11-25 09:13:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:13:11.212369805 +0000 UTC m=+1451.338399368" watchObservedRunningTime="2025-11-25 09:13:11.249007408 +0000 UTC m=+1451.375036971" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.249649 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novacell0a937-account-delete-czmhb" podStartSLOduration=7.249643376 podStartE2EDuration="7.249643376s" podCreationTimestamp="2025-11-25 09:13:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:13:11.236224387 +0000 UTC m=+1451.362253960" watchObservedRunningTime="2025-11-25 09:13:11.249643376 +0000 UTC m=+1451.375672939" Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.251853 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.251930 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts podName:2dac2ddd-1d32-406d-bb47-cbcb0bd71b71 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:11.751905702 +0000 UTC m=+1451.877935265 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts") pod "novacell0a937-account-delete-czmhb" (UID: "2dac2ddd-1d32-406d-bb47-cbcb0bd71b71") : configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.252588 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.252648 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts podName:dbc1ab9c-f494-4ce9-8758-d5c724e4413a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:11.752629583 +0000 UTC m=+1451.878659256 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts") pod "barbican8b4d-account-delete-dprdr" (UID: "dbc1ab9c-f494-4ce9-8758-d5c724e4413a") : configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.252940 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.252983 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts podName:838bc013-33ba-4722-be1d-b88c9016c83a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:11.752970803 +0000 UTC m=+1451.879000486 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts") pod "novaapi7e0a-account-delete-drmkw" (UID: "838bc013-33ba-4722-be1d-b88c9016c83a") : configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.257844 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance5148-account-delete-fbhmq" podStartSLOduration=7.257827473 podStartE2EDuration="7.257827473s" podCreationTimestamp="2025-11-25 09:13:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:13:11.256428433 +0000 UTC m=+1451.382457996" watchObservedRunningTime="2025-11-25 09:13:11.257827473 +0000 UTC m=+1451.383857036" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287157 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287219 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementb0bd-account-delete-vxbgw" event={"ID":"ce711acf-071a-4387-8c42-e2f3f8c25df9","Type":"ContainerDied","Data":"bfedcc77c3faf7eba937e63b985603e26c2ec437945d79e3c486c0e00aff0af3"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287244 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bfedcc77c3faf7eba937e63b985603e26c2ec437945d79e3c486c0e00aff0af3" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287254 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61","Type":"ContainerDied","Data":"dd294829b7ac1e7bd9e05c8a705e4dc8b34a49faa43b91ea8147a1f67529b4da"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287264 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd294829b7ac1e7bd9e05c8a705e4dc8b34a49faa43b91ea8147a1f67529b4da" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287273 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-xlpr4"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287284 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-dqk99"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287297 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron4147-account-delete-qzg4q" event={"ID":"a181c094-1cf9-42bd-b038-cc8a6f437aa3","Type":"ContainerDied","Data":"86482ae8db87016684a457f31b6613af3655be261977713614c37a0b9fd0465f"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287309 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f5484589f-8gmzk" event={"ID":"9e365f51-6fe5-47b3-b183-5cf5cae5c65e","Type":"ContainerDied","Data":"8e665194bc97ba844124369d64739611e89e9cf21dbcf6b1a53684060eef9389"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287332 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e665194bc97ba844124369d64739611e89e9cf21dbcf6b1a53684060eef9389" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287340 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi7e0a-account-delete-drmkw" event={"ID":"838bc013-33ba-4722-be1d-b88c9016c83a","Type":"ContainerStarted","Data":"9b476db280639f6d81d327b4980e19692d4945304938d0452dc6f72cbad0dc3c"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287351 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"90db5718-c185-4863-888a-6cb41ca5339d","Type":"ContainerDied","Data":"44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287368 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-xlpr4"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287391 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"90db5718-c185-4863-888a-6cb41ca5339d","Type":"ContainerDied","Data":"5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287413 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-dqk99"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287431 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"90db5718-c185-4863-888a-6cb41ca5339d","Type":"ContainerDied","Data":"d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287441 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-6fb96c5d7c-tsdlh"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287455 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder71a7-account-delete-wslds" event={"ID":"633c3722-e337-4b6a-98fe-451ac451dd06","Type":"ContainerDied","Data":"bdf7364e9fd604703103c20638764530f1a8592227c9d8357f0450708d123579"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287477 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287498 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6c674848fb-kcq2h" event={"ID":"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee","Type":"ContainerDied","Data":"9bfc0f6aeca199054cc5f74846dd1737267b925812ff8c39383aa7003f0745b2"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287508 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9bfc0f6aeca199054cc5f74846dd1737267b925812ff8c39383aa7003f0745b2" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287520 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-jlfk4"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287531 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-jlfk4"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287552 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bac1-account-create-ntphl"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287562 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0a937-account-delete-czmhb" event={"ID":"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71","Type":"ContainerStarted","Data":"9b60a7120977a356bc3ba5ed856d8a02152e5c7b2b97a474909ee1d09c5d2fa5"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287574 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bac1-account-create-ntphl"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287586 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican8b4d-account-delete-dprdr" event={"ID":"dbc1ab9c-f494-4ce9-8758-d5c724e4413a","Type":"ContainerStarted","Data":"c17d2b7a60e488f01f5b61b845be2ae08ecd0dcb78cfdb75a8b72ffb6d34fa27"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287607 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"c7865402-5a21-44f9-9436-d5d1bab67a07","Type":"ContainerDied","Data":"5390171e2f1d5baf3a0c95de61ae540ccbbe324cef3d85a44562125aee7d8634"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287619 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5390171e2f1d5baf3a0c95de61ae540ccbbe324cef3d85a44562125aee7d8634" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287628 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c5101ae2-5106-48c7-9116-4c0e5ededb84","Type":"ContainerDied","Data":"9100a82045755f862259439d50e1bed4ab6f7c4cc3ed16bd1ac86db7a21762e3"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287638 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9100a82045755f862259439d50e1bed4ab6f7c4cc3ed16bd1ac86db7a21762e3" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287646 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"31823923-9ce9-49e0-b4c1-42418d49918c","Type":"ContainerDied","Data":"9364cfc667ba84209a56fce2ce4da8c6edded2af654c8375e60e8d2e2d7cced1"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287655 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9364cfc667ba84209a56fce2ce4da8c6edded2af654c8375e60e8d2e2d7cced1" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287663 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"b57cfb59-e562-4fb2-bfad-b4cf5382c45a","Type":"ContainerDied","Data":"75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287682 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"90c30cef-5376-4f4a-8d59-9ab6daff902d","Type":"ContainerDied","Data":"ceb3b42ad1b5a2d2807be40562bba836fb84b40cd4d5af68cf85ad5b467e9bd1"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287693 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ceb3b42ad1b5a2d2807be40562bba836fb84b40cd4d5af68cf85ad5b467e9bd1" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287703 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance5148-account-delete-fbhmq" event={"ID":"36140bfd-540f-40b6-8521-a8a3d408dc9d","Type":"ContainerStarted","Data":"b3077aa432072fd3e5326ed9fb2a90716d917a8e76bd378c56a62bc655717477"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.287714 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad","Type":"ContainerDied","Data":"c99b5f0370ed3831068a8fcc89de815c298aa1e1d8bcee1c409429deb6c5c99a"} Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.291268 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-6fb96c5d7c-tsdlh" podUID="8153c48a-65e5-4525-b3ca-4dba83d94681" containerName="keystone-api" containerID="cri-o://cf9486063626577ad9657d77cfb72663e93d27944dae23244b9e36f69d66b24d" gracePeriod=30 Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.294733 4932 scope.go:117] "RemoveContainer" containerID="cd839751f73e93f33c82ece92bdaf68a46775b2428ad48d61c20238e06cf889d" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.297701 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="cc680bc2-b240-40b6-b77e-c0d264f283b3" containerName="memcached" containerID="cri-o://24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e" gracePeriod=30 Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.354794 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.354853 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts podName:36140bfd-540f-40b6-8521-a8a3d408dc9d nodeName:}" failed. No retries permitted until 2025-11-25 09:13:11.854839197 +0000 UTC m=+1451.980868750 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts") pod "glance5148-account-delete-fbhmq" (UID: "36140bfd-540f-40b6-8521-a8a3d408dc9d") : configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.397498 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.416598 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-76dfd47846-vpn45"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.422509 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.426443 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-76dfd47846-vpn45"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.431446 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.436466 4932 scope.go:117] "RemoveContainer" containerID="b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.437585 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-6d444df75c-9wqvx"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.450361 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-6d444df75c-9wqvx"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.452469 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.470907 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.472976 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.475051 4932 scope.go:117] "RemoveContainer" containerID="4ec3744d9a3e32c5d252c31f008b49edb1884c6bb290d5af4a837ca5bbb374f8" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.478845 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.481656 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.488614 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5cfb6b64bb-8mrcr"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.514357 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5cfb6b64bb-8mrcr"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.520706 4932 scope.go:117] "RemoveContainer" containerID="1b7fb306f61206aff751cf1adbf835164dd03eeceaa44f76421e2b0575c75592" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.523029 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.523271 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-c26qd"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.525938 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementb0bd-account-delete-vxbgw" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.527789 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.528668 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-c26qd"] Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.533315 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.552548 4932 scope.go:117] "RemoveContainer" containerID="8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.553783 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140"} err="failed to get container status \"8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140\": rpc error: code = NotFound desc = could not find container \"8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140\": container with ID starting with 8bb403cc4b7e6438f2b21d2f5351a6291f83c3b7589e02e72295c214374a9140 not found: ID does not exist" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.553811 4932 scope.go:117] "RemoveContainer" containerID="281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562565 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34"} err="failed to get container status \"281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34\": rpc error: code = NotFound desc = could not find container \"281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34\": container with ID starting with 281da720cbf17b7ed1e24b2ffeba1900f4a5f94acab86edc46e69880b466ba34 not found: ID does not exist" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562609 4932 scope.go:117] "RemoveContainer" containerID="b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562724 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-log-httpd\") pod \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562764 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-internal-tls-certs\") pod \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562810 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data\") pod \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562836 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-combined-ca-bundle\") pod \"c7865402-5a21-44f9-9436-d5d1bab67a07\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562852 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-internal-tls-certs\") pod \"c7865402-5a21-44f9-9436-d5d1bab67a07\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562866 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data\") pod \"c7865402-5a21-44f9-9436-d5d1bab67a07\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562886 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-public-tls-certs\") pod \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562903 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-internal-tls-certs\") pod \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562919 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zgf4\" (UniqueName: \"kubernetes.io/projected/c7865402-5a21-44f9-9436-d5d1bab67a07-kube-api-access-6zgf4\") pod \"c7865402-5a21-44f9-9436-d5d1bab67a07\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562938 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90c30cef-5376-4f4a-8d59-9ab6daff902d-logs\") pod \"90c30cef-5376-4f4a-8d59-9ab6daff902d\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562968 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.562983 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-config-data\") pod \"90c30cef-5376-4f4a-8d59-9ab6daff902d\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.563006 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dvsc\" (UniqueName: \"kubernetes.io/projected/31823923-9ce9-49e0-b4c1-42418d49918c-kube-api-access-9dvsc\") pod \"31823923-9ce9-49e0-b4c1-42418d49918c\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.563036 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-combined-ca-bundle\") pod \"31823923-9ce9-49e0-b4c1-42418d49918c\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.563082 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-logs\") pod \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.563102 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-nova-metadata-tls-certs\") pod \"90c30cef-5376-4f4a-8d59-9ab6daff902d\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.563478 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7865402-5a21-44f9-9436-d5d1bab67a07-logs\") pod \"c7865402-5a21-44f9-9436-d5d1bab67a07\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.563499 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-combined-ca-bundle\") pod \"90c30cef-5376-4f4a-8d59-9ab6daff902d\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.563522 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-combined-ca-bundle\") pod \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.563565 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9e365f51-6fe5-47b3-b183-5cf5cae5c65e" (UID: "9e365f51-6fe5-47b3-b183-5cf5cae5c65e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.563768 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47\": container with ID starting with b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47 not found: ID does not exist" containerID="b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.563793 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47"} err="failed to get container status \"b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47\": rpc error: code = NotFound desc = could not find container \"b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47\": container with ID starting with b047f8058780633c355fc8e9683f7b490f8b5e7965f5faca61d4c3c6becced47 not found: ID does not exist" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.563813 4932 scope.go:117] "RemoveContainer" containerID="37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564207 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-public-tls-certs\") pod \"31823923-9ce9-49e0-b4c1-42418d49918c\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564235 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-public-tls-certs\") pod \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564305 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-config-data\") pod \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564337 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-config-data\") pod \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564356 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-scripts\") pod \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564376 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-run-httpd\") pod \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564396 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-internal-tls-certs\") pod \"31823923-9ce9-49e0-b4c1-42418d49918c\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564413 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-scripts\") pod \"c7865402-5a21-44f9-9436-d5d1bab67a07\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564432 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-config-data\") pod \"31823923-9ce9-49e0-b4c1-42418d49918c\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564451 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data-custom\") pod \"c7865402-5a21-44f9-9436-d5d1bab67a07\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564635 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrbvd\" (UniqueName: \"kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-kube-api-access-xrbvd\") pod \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564654 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7865402-5a21-44f9-9436-d5d1bab67a07-etc-machine-id\") pod \"c7865402-5a21-44f9-9436-d5d1bab67a07\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564676 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-478km\" (UniqueName: \"kubernetes.io/projected/90c30cef-5376-4f4a-8d59-9ab6daff902d-kube-api-access-478km\") pod \"90c30cef-5376-4f4a-8d59-9ab6daff902d\" (UID: \"90c30cef-5376-4f4a-8d59-9ab6daff902d\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564698 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-public-tls-certs\") pod \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564717 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qx2rr\" (UniqueName: \"kubernetes.io/projected/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-kube-api-access-qx2rr\") pod \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564739 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-logs\") pod \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564756 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31823923-9ce9-49e0-b4c1-42418d49918c-logs\") pod \"31823923-9ce9-49e0-b4c1-42418d49918c\" (UID: \"31823923-9ce9-49e0-b4c1-42418d49918c\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564782 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-httpd-run\") pod \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564800 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-public-tls-certs\") pod \"c7865402-5a21-44f9-9436-d5d1bab67a07\" (UID: \"c7865402-5a21-44f9-9436-d5d1bab67a07\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564813 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data-custom\") pod \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\" (UID: \"c1e12e22-8a2c-4093-b9c5-7cc68348e0ee\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564839 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-combined-ca-bundle\") pod \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564854 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjrsk\" (UniqueName: \"kubernetes.io/projected/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-kube-api-access-fjrsk\") pod \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564870 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-combined-ca-bundle\") pod \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\" (UID: \"f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.564894 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-etc-swift\") pod \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\" (UID: \"9e365f51-6fe5-47b3-b183-5cf5cae5c65e\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.565413 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.566581 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90c30cef-5376-4f4a-8d59-9ab6daff902d-logs" (OuterVolumeSpecName: "logs") pod "90c30cef-5376-4f4a-8d59-9ab6daff902d" (UID: "90c30cef-5376-4f4a-8d59-9ab6daff902d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.566699 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7865402-5a21-44f9-9436-d5d1bab67a07-logs" (OuterVolumeSpecName: "logs") pod "c7865402-5a21-44f9-9436-d5d1bab67a07" (UID: "c7865402-5a21-44f9-9436-d5d1bab67a07"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.580071 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-logs" (OuterVolumeSpecName: "logs") pod "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" (UID: "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.580749 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31823923-9ce9-49e0-b4c1-42418d49918c-kube-api-access-9dvsc" (OuterVolumeSpecName: "kube-api-access-9dvsc") pod "31823923-9ce9-49e0-b4c1-42418d49918c" (UID: "31823923-9ce9-49e0-b4c1-42418d49918c"). InnerVolumeSpecName "kube-api-access-9dvsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.580859 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "9e365f51-6fe5-47b3-b183-5cf5cae5c65e" (UID: "9e365f51-6fe5-47b3-b183-5cf5cae5c65e"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.586382 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9e365f51-6fe5-47b3-b183-5cf5cae5c65e" (UID: "9e365f51-6fe5-47b3-b183-5cf5cae5c65e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.587098 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-logs" (OuterVolumeSpecName: "logs") pod "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" (UID: "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.587457 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31823923-9ce9-49e0-b4c1-42418d49918c-logs" (OuterVolumeSpecName: "logs") pod "31823923-9ce9-49e0-b4c1-42418d49918c" (UID: "31823923-9ce9-49e0-b4c1-42418d49918c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.587705 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" (UID: "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.588112 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c7865402-5a21-44f9-9436-d5d1bab67a07-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c7865402-5a21-44f9-9436-d5d1bab67a07" (UID: "c7865402-5a21-44f9-9436-d5d1bab67a07"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.596676 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" (UID: "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.601623 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7865402-5a21-44f9-9436-d5d1bab67a07-kube-api-access-6zgf4" (OuterVolumeSpecName: "kube-api-access-6zgf4") pod "c7865402-5a21-44f9-9436-d5d1bab67a07" (UID: "c7865402-5a21-44f9-9436-d5d1bab67a07"). InnerVolumeSpecName "kube-api-access-6zgf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.601703 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" (UID: "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.601751 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-scripts" (OuterVolumeSpecName: "scripts") pod "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" (UID: "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.604360 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-kube-api-access-qx2rr" (OuterVolumeSpecName: "kube-api-access-qx2rr") pod "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" (UID: "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee"). InnerVolumeSpecName "kube-api-access-qx2rr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.605071 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90c30cef-5376-4f4a-8d59-9ab6daff902d-kube-api-access-478km" (OuterVolumeSpecName: "kube-api-access-478km") pod "90c30cef-5376-4f4a-8d59-9ab6daff902d" (UID: "90c30cef-5376-4f4a-8d59-9ab6daff902d"). InnerVolumeSpecName "kube-api-access-478km". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.605082 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-kube-api-access-xrbvd" (OuterVolumeSpecName: "kube-api-access-xrbvd") pod "9e365f51-6fe5-47b3-b183-5cf5cae5c65e" (UID: "9e365f51-6fe5-47b3-b183-5cf5cae5c65e"). InnerVolumeSpecName "kube-api-access-xrbvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.605273 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c7865402-5a21-44f9-9436-d5d1bab67a07" (UID: "c7865402-5a21-44f9-9436-d5d1bab67a07"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.607166 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-scripts" (OuterVolumeSpecName: "scripts") pod "c7865402-5a21-44f9-9436-d5d1bab67a07" (UID: "c7865402-5a21-44f9-9436-d5d1bab67a07"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.607868 4932 scope.go:117] "RemoveContainer" containerID="a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.612321 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-kube-api-access-fjrsk" (OuterVolumeSpecName: "kube-api-access-fjrsk") pod "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" (UID: "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61"). InnerVolumeSpecName "kube-api-access-fjrsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.613304 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="8134265d-9da9-4607-8db8-98330608ba4c" containerName="galera" containerID="cri-o://1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541" gracePeriod=30 Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.658648 4932 scope.go:117] "RemoveContainer" containerID="37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073" Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.659988 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073\": container with ID starting with 37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073 not found: ID does not exist" containerID="37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.660026 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073"} err="failed to get container status \"37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073\": rpc error: code = NotFound desc = could not find container \"37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073\": container with ID starting with 37eb50d15d830d70e8b4be163572a289ca3f0846ac1f4d81e7dddab683f30073 not found: ID does not exist" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.660054 4932 scope.go:117] "RemoveContainer" containerID="a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067" Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.660471 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067\": container with ID starting with a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067 not found: ID does not exist" containerID="a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.660488 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067"} err="failed to get container status \"a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067\": rpc error: code = NotFound desc = could not find container \"a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067\": container with ID starting with a22cf2e0c576d7204a2ff048c9701afcba262a10c88a76d5f1693c19d7d32067 not found: ID does not exist" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.660503 4932 scope.go:117] "RemoveContainer" containerID="be76288d747fd77398730e153b2bfa8b05e410e8971bd296d8c9d0bb4df3ac3b" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.665937 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cg6ql\" (UniqueName: \"kubernetes.io/projected/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-kube-api-access-cg6ql\") pod \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.665974 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-combined-ca-bundle\") pod \"c5101ae2-5106-48c7-9116-4c0e5ededb84\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666048 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-combined-ca-bundle\") pod \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666081 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"c5101ae2-5106-48c7-9116-4c0e5ededb84\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666105 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-config\") pod \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666145 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-config-data\") pod \"c5101ae2-5106-48c7-9116-4c0e5ededb84\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666221 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-logs\") pod \"c5101ae2-5106-48c7-9116-4c0e5ededb84\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666250 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-internal-tls-certs\") pod \"c5101ae2-5106-48c7-9116-4c0e5ededb84\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666272 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-httpd-run\") pod \"c5101ae2-5106-48c7-9116-4c0e5ededb84\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666294 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce711acf-071a-4387-8c42-e2f3f8c25df9-operator-scripts\") pod \"ce711acf-071a-4387-8c42-e2f3f8c25df9\" (UID: \"ce711acf-071a-4387-8c42-e2f3f8c25df9\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666362 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-scripts\") pod \"c5101ae2-5106-48c7-9116-4c0e5ededb84\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666378 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-certs\") pod \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666494 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tw5xn\" (UniqueName: \"kubernetes.io/projected/ce711acf-071a-4387-8c42-e2f3f8c25df9-kube-api-access-tw5xn\") pod \"ce711acf-071a-4387-8c42-e2f3f8c25df9\" (UID: \"ce711acf-071a-4387-8c42-e2f3f8c25df9\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666527 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s57q6\" (UniqueName: \"kubernetes.io/projected/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-api-access-s57q6\") pod \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666545 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-combined-ca-bundle\") pod \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\" (UID: \"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666562 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh8q9\" (UniqueName: \"kubernetes.io/projected/c5101ae2-5106-48c7-9116-4c0e5ededb84-kube-api-access-mh8q9\") pod \"c5101ae2-5106-48c7-9116-4c0e5ededb84\" (UID: \"c5101ae2-5106-48c7-9116-4c0e5ededb84\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.666581 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-config-data\") pod \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\" (UID: \"b57cfb59-e562-4fb2-bfad-b4cf5382c45a\") " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.680489 4932 scope.go:117] "RemoveContainer" containerID="905fc878d7a680a212ca79f470646dd7111019ec6d24cae51d0d6adfba1d2500" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692409 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjrsk\" (UniqueName: \"kubernetes.io/projected/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-kube-api-access-fjrsk\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692455 4932 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692471 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zgf4\" (UniqueName: \"kubernetes.io/projected/c7865402-5a21-44f9-9436-d5d1bab67a07-kube-api-access-6zgf4\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692485 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90c30cef-5376-4f4a-8d59-9ab6daff902d-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692516 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692531 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dvsc\" (UniqueName: \"kubernetes.io/projected/31823923-9ce9-49e0-b4c1-42418d49918c-kube-api-access-9dvsc\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692545 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692559 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7865402-5a21-44f9-9436-d5d1bab67a07-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692574 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692587 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692602 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692617 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692630 4932 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c7865402-5a21-44f9-9436-d5d1bab67a07-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692647 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrbvd\" (UniqueName: \"kubernetes.io/projected/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-kube-api-access-xrbvd\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692661 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-478km\" (UniqueName: \"kubernetes.io/projected/90c30cef-5376-4f4a-8d59-9ab6daff902d-kube-api-access-478km\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692675 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qx2rr\" (UniqueName: \"kubernetes.io/projected/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-kube-api-access-qx2rr\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692689 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692701 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31823923-9ce9-49e0-b4c1-42418d49918c-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692714 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692727 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692545 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c5101ae2-5106-48c7-9116-4c0e5ededb84" (UID: "c5101ae2-5106-48c7-9116-4c0e5ededb84"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.692897 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce711acf-071a-4387-8c42-e2f3f8c25df9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ce711acf-071a-4387-8c42-e2f3f8c25df9" (UID: "ce711acf-071a-4387-8c42-e2f3f8c25df9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.694112 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-logs" (OuterVolumeSpecName: "logs") pod "c5101ae2-5106-48c7-9116-4c0e5ededb84" (UID: "c5101ae2-5106-48c7-9116-4c0e5ededb84"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.732785 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "c5101ae2-5106-48c7-9116-4c0e5ededb84" (UID: "c5101ae2-5106-48c7-9116-4c0e5ededb84"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.735080 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-api-access-s57q6" (OuterVolumeSpecName: "kube-api-access-s57q6") pod "31866cc1-ccc2-4ffc-8de9-4651a1aa41ad" (UID: "31866cc1-ccc2-4ffc-8de9-4651a1aa41ad"). InnerVolumeSpecName "kube-api-access-s57q6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.735590 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce711acf-071a-4387-8c42-e2f3f8c25df9-kube-api-access-tw5xn" (OuterVolumeSpecName: "kube-api-access-tw5xn") pod "ce711acf-071a-4387-8c42-e2f3f8c25df9" (UID: "ce711acf-071a-4387-8c42-e2f3f8c25df9"). InnerVolumeSpecName "kube-api-access-tw5xn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.736021 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-scripts" (OuterVolumeSpecName: "scripts") pod "c5101ae2-5106-48c7-9116-4c0e5ededb84" (UID: "c5101ae2-5106-48c7-9116-4c0e5ededb84"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.739395 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "31823923-9ce9-49e0-b4c1-42418d49918c" (UID: "31823923-9ce9-49e0-b4c1-42418d49918c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.753318 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-55bfb77665-qbfss" podUID="a9855d3c-818d-4804-add2-d6b0fce52613" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.202:5353: i/o timeout" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.753365 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-kube-api-access-cg6ql" (OuterVolumeSpecName: "kube-api-access-cg6ql") pod "b57cfb59-e562-4fb2-bfad-b4cf5382c45a" (UID: "b57cfb59-e562-4fb2-bfad-b4cf5382c45a"). InnerVolumeSpecName "kube-api-access-cg6ql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.754399 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5101ae2-5106-48c7-9116-4c0e5ededb84-kube-api-access-mh8q9" (OuterVolumeSpecName: "kube-api-access-mh8q9") pod "c5101ae2-5106-48c7-9116-4c0e5ededb84" (UID: "c5101ae2-5106-48c7-9116-4c0e5ededb84"). InnerVolumeSpecName "kube-api-access-mh8q9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.793891 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tw5xn\" (UniqueName: \"kubernetes.io/projected/ce711acf-071a-4387-8c42-e2f3f8c25df9-kube-api-access-tw5xn\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.793919 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s57q6\" (UniqueName: \"kubernetes.io/projected/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-api-access-s57q6\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.793929 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mh8q9\" (UniqueName: \"kubernetes.io/projected/c5101ae2-5106-48c7-9116-4c0e5ededb84-kube-api-access-mh8q9\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.793938 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cg6ql\" (UniqueName: \"kubernetes.io/projected/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-kube-api-access-cg6ql\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.793955 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.793965 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-logs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.793973 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c5101ae2-5106-48c7-9116-4c0e5ededb84-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.793982 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ce711acf-071a-4387-8c42-e2f3f8c25df9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.793990 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.793999 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.794047 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.794097 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts podName:838bc013-33ba-4722-be1d-b88c9016c83a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:12.794081489 +0000 UTC m=+1452.920111052 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts") pod "novaapi7e0a-account-delete-drmkw" (UID: "838bc013-33ba-4722-be1d-b88c9016c83a") : configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.794132 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.794151 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts podName:2dac2ddd-1d32-406d-bb47-cbcb0bd71b71 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:12.794145751 +0000 UTC m=+1452.920175314 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts") pod "novacell0a937-account-delete-czmhb" (UID: "2dac2ddd-1d32-406d-bb47-cbcb0bd71b71") : configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.794798 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.794878 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts podName:dbc1ab9c-f494-4ce9-8758-d5c724e4413a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:12.794859382 +0000 UTC m=+1452.920888945 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts") pod "barbican8b4d-account-delete-dprdr" (UID: "dbc1ab9c-f494-4ce9-8758-d5c724e4413a") : configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.798648 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "90c30cef-5376-4f4a-8d59-9ab6daff902d" (UID: "90c30cef-5376-4f4a-8d59-9ab6daff902d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.807948 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.822113 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-config-data" (OuterVolumeSpecName: "config-data") pod "90c30cef-5376-4f4a-8d59-9ab6daff902d" (UID: "90c30cef-5376-4f4a-8d59-9ab6daff902d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.830615 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.847459 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "31866cc1-ccc2-4ffc-8de9-4651a1aa41ad" (UID: "31866cc1-ccc2-4ffc-8de9-4651a1aa41ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.847469 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-config-data" (OuterVolumeSpecName: "config-data") pod "b57cfb59-e562-4fb2-bfad-b4cf5382c45a" (UID: "b57cfb59-e562-4fb2-bfad-b4cf5382c45a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.862410 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b57cfb59-e562-4fb2-bfad-b4cf5382c45a" (UID: "b57cfb59-e562-4fb2-bfad-b4cf5382c45a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.886994 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" (UID: "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.891317 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c7865402-5a21-44f9-9436-d5d1bab67a07" (UID: "c7865402-5a21-44f9-9436-d5d1bab67a07"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.896095 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.896116 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.896126 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.896136 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.896146 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.896156 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.896164 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.896172 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b57cfb59-e562-4fb2-bfad-b4cf5382c45a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.896180 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.896262 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: E1125 09:13:11.896310 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts podName:36140bfd-540f-40b6-8521-a8a3d408dc9d nodeName:}" failed. No retries permitted until 2025-11-25 09:13:12.896293804 +0000 UTC m=+1453.022323367 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts") pod "glance5148-account-delete-fbhmq" (UID: "36140bfd-540f-40b6-8521-a8a3d408dc9d") : configmap "openstack-scripts" not found Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.933797 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" (UID: "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.935214 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "31866cc1-ccc2-4ffc-8de9-4651a1aa41ad" (UID: "31866cc1-ccc2-4ffc-8de9-4651a1aa41ad"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.942496 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "31823923-9ce9-49e0-b4c1-42418d49918c" (UID: "31823923-9ce9-49e0-b4c1-42418d49918c"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.959718 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "9e365f51-6fe5-47b3-b183-5cf5cae5c65e" (UID: "9e365f51-6fe5-47b3-b183-5cf5cae5c65e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.960305 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "31823923-9ce9-49e0-b4c1-42418d49918c" (UID: "31823923-9ce9-49e0-b4c1-42418d49918c"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.963135 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5101ae2-5106-48c7-9116-4c0e5ededb84" (UID: "c5101ae2-5106-48c7-9116-4c0e5ededb84"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.970296 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data" (OuterVolumeSpecName: "config-data") pod "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" (UID: "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.975784 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c7865402-5a21-44f9-9436-d5d1bab67a07" (UID: "c7865402-5a21-44f9-9436-d5d1bab67a07"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.998616 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.998644 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.998655 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.998664 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.998673 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.998681 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.998689 4932 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:11 crc kubenswrapper[4932]: I1125 09:13:11.998700 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.003473 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "31866cc1-ccc2-4ffc-8de9-4651a1aa41ad" (UID: "31866cc1-ccc2-4ffc-8de9-4651a1aa41ad"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.005584 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c5101ae2-5106-48c7-9116-4c0e5ededb84" (UID: "c5101ae2-5106-48c7-9116-4c0e5ededb84"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.006117 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-config-data" (OuterVolumeSpecName: "config-data") pod "c5101ae2-5106-48c7-9116-4c0e5ededb84" (UID: "c5101ae2-5106-48c7-9116-4c0e5ededb84"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.010074 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" (UID: "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.034456 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-config-data" (OuterVolumeSpecName: "config-data") pod "31823923-9ce9-49e0-b4c1-42418d49918c" (UID: "31823923-9ce9-49e0-b4c1-42418d49918c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.037259 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "9e365f51-6fe5-47b3-b183-5cf5cae5c65e" (UID: "9e365f51-6fe5-47b3-b183-5cf5cae5c65e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.046396 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data" (OuterVolumeSpecName: "config-data") pod "c7865402-5a21-44f9-9436-d5d1bab67a07" (UID: "c7865402-5a21-44f9-9436-d5d1bab67a07"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.050444 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.051626 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.055615 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.055647 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="d586a3b8-c6b8-4c6e-aa6f-11797966d218" containerName="nova-cell1-conductor-conductor" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.061397 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7865402-5a21-44f9-9436-d5d1bab67a07" (UID: "c7865402-5a21-44f9-9436-d5d1bab67a07"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.065934 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-config-data" (OuterVolumeSpecName: "config-data") pod "9e365f51-6fe5-47b3-b183-5cf5cae5c65e" (UID: "9e365f51-6fe5-47b3-b183-5cf5cae5c65e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.080085 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-config-data" (OuterVolumeSpecName: "config-data") pod "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" (UID: "f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.084015 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" (UID: "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.095476 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "90c30cef-5376-4f4a-8d59-9ab6daff902d" (UID: "90c30cef-5376-4f4a-8d59-9ab6daff902d"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100475 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100508 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100517 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7865402-5a21-44f9-9436-d5d1bab67a07-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100525 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100534 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100542 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5101ae2-5106-48c7-9116-4c0e5ededb84-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100551 4932 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100560 4932 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/90c30cef-5376-4f4a-8d59-9ab6daff902d-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100567 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100576 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100585 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31823923-9ce9-49e0-b4c1-42418d49918c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.100593 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.104756 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" (UID: "c1e12e22-8a2c-4093-b9c5-7cc68348e0ee"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.117550 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9e365f51-6fe5-47b3-b183-5cf5cae5c65e" (UID: "9e365f51-6fe5-47b3-b183-5cf5cae5c65e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.204734 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.204767 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e365f51-6fe5-47b3-b183-5cf5cae5c65e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.204837 4932 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.204879 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data podName:f41b25a4-f48e-4938-9c23-0d89751af6ae nodeName:}" failed. No retries permitted until 2025-11-25 09:13:20.204865185 +0000 UTC m=+1460.330894738 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data") pod "rabbitmq-cell1-server-0" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae") : configmap "rabbitmq-cell1-config-data" not found Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.211110 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.309738 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-config-data\") pod \"cc680bc2-b240-40b6-b77e-c0d264f283b3\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.309778 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-combined-ca-bundle\") pod \"cc680bc2-b240-40b6-b77e-c0d264f283b3\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.309853 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-memcached-tls-certs\") pod \"cc680bc2-b240-40b6-b77e-c0d264f283b3\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.309894 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5d4k\" (UniqueName: \"kubernetes.io/projected/cc680bc2-b240-40b6-b77e-c0d264f283b3-kube-api-access-n5d4k\") pod \"cc680bc2-b240-40b6-b77e-c0d264f283b3\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.309931 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-kolla-config\") pod \"cc680bc2-b240-40b6-b77e-c0d264f283b3\" (UID: \"cc680bc2-b240-40b6-b77e-c0d264f283b3\") " Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.310996 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "cc680bc2-b240-40b6-b77e-c0d264f283b3" (UID: "cc680bc2-b240-40b6-b77e-c0d264f283b3"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.324024 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"31866cc1-ccc2-4ffc-8de9-4651a1aa41ad","Type":"ContainerDied","Data":"41c91b80b3392c4827df6a0af10ff1bbc06b12abdc9d85527024976e4de2ee8c"} Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.324073 4932 scope.go:117] "RemoveContainer" containerID="c99b5f0370ed3831068a8fcc89de815c298aa1e1d8bcee1c409429deb6c5c99a" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.324172 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.341032 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-config-data" (OuterVolumeSpecName: "config-data") pod "cc680bc2-b240-40b6-b77e-c0d264f283b3" (UID: "cc680bc2-b240-40b6-b77e-c0d264f283b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.373933 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cc680bc2-b240-40b6-b77e-c0d264f283b3" (UID: "cc680bc2-b240-40b6-b77e-c0d264f283b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.394438 4932 generic.go:334] "Generic (PLEG): container finished" podID="cc680bc2-b240-40b6-b77e-c0d264f283b3" containerID="24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e" exitCode=0 Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.394501 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cc680bc2-b240-40b6-b77e-c0d264f283b3","Type":"ContainerDied","Data":"24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e"} Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.394527 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cc680bc2-b240-40b6-b77e-c0d264f283b3","Type":"ContainerDied","Data":"881b0256ab688de0f995a1e9c73f2413abdd38cd31f47c48990007e37cd74934"} Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.394578 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.406405 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc680bc2-b240-40b6-b77e-c0d264f283b3-kube-api-access-n5d4k" (OuterVolumeSpecName: "kube-api-access-n5d4k") pod "cc680bc2-b240-40b6-b77e-c0d264f283b3" (UID: "cc680bc2-b240-40b6-b77e-c0d264f283b3"). InnerVolumeSpecName "kube-api-access-n5d4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.413205 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n5d4k\" (UniqueName: \"kubernetes.io/projected/cc680bc2-b240-40b6-b77e-c0d264f283b3-kube-api-access-n5d4k\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.413237 4932 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.413246 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cc680bc2-b240-40b6-b77e-c0d264f283b3-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.413255 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.419979 4932 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novacell0a937-account-delete-czmhb" secret="" err="secret \"galera-openstack-dockercfg-82gjf\" not found" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.420746 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.421914 4932 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novaapi7e0a-account-delete-drmkw" secret="" err="secret \"galera-openstack-dockercfg-82gjf\" not found" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.423623 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.423701 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.423747 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.424178 4932 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/glance5148-account-delete-fbhmq" secret="" err="secret \"galera-openstack-dockercfg-82gjf\" not found" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.424417 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"b57cfb59-e562-4fb2-bfad-b4cf5382c45a","Type":"ContainerDied","Data":"bbfd84cd76872f9375cfcf4c9cf8aecdca57dd6170845385f12124b333c03b35"} Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.424471 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.428416 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.428911 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7f5484589f-8gmzk" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.458087 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.458142 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.458301 4932 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/barbican8b4d-account-delete-dprdr" secret="" err="secret \"galera-openstack-dockercfg-82gjf\" not found" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.458601 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6c674848fb-kcq2h" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.470251 4932 scope.go:117] "RemoveContainer" containerID="24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.471389 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementb0bd-account-delete-vxbgw" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.557931 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "cc680bc2-b240-40b6-b77e-c0d264f283b3" (UID: "cc680bc2-b240-40b6-b77e-c0d264f283b3"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.603203 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.633792 4932 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.633875 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data podName:969d317e-0787-44a8-8e27-554b0e887444 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:20.63385719 +0000 UTC m=+1460.759886753 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data") pod "rabbitmq-server-0" (UID: "969d317e-0787-44a8-8e27-554b0e887444") : configmap "rabbitmq-config-data" not found Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.634822 4932 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc680bc2-b240-40b6-b77e-c0d264f283b3-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.721938 4932 scope.go:117] "RemoveContainer" containerID="24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e" Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.723711 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e\": container with ID starting with 24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e not found: ID does not exist" containerID="24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.723774 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e"} err="failed to get container status \"24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e\": rpc error: code = NotFound desc = could not find container \"24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e\": container with ID starting with 24139402ea40ebc14651ed8abb1e19b5eb521d5cde48c204c4f1c8a1f505e13e not found: ID does not exist" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.723811 4932 scope.go:117] "RemoveContainer" containerID="75e6779617423b881b03c62dbe9856298c32198e80962d252b1ab4afc7067b5d" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.729880 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2023df73-6a92-4838-8d5e-31f533796950" path="/var/lib/kubelet/pods/2023df73-6a92-4838-8d5e-31f533796950/volumes" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.730641 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31866cc1-ccc2-4ffc-8de9-4651a1aa41ad" path="/var/lib/kubelet/pods/31866cc1-ccc2-4ffc-8de9-4651a1aa41ad/volumes" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.731799 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7537a42d-2ff4-48ed-b6c4-3efc948e72ab" path="/var/lib/kubelet/pods/7537a42d-2ff4-48ed-b6c4-3efc948e72ab/volumes" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.732525 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a1917d6-4455-4cf5-b932-a38584663b02" path="/var/lib/kubelet/pods/7a1917d6-4455-4cf5-b932-a38584663b02/volumes" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.733320 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c8b9eab-5875-4ce0-a580-e82023c14801" path="/var/lib/kubelet/pods/9c8b9eab-5875-4ce0-a580-e82023c14801/volumes" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.734926 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a30fe075-e21e-4406-9067-0ca8f5b8d2f3" path="/var/lib/kubelet/pods/a30fe075-e21e-4406-9067-0ca8f5b8d2f3/volumes" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.735626 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a83ee8ae-69d7-4ca5-ade1-9d2450880338" path="/var/lib/kubelet/pods/a83ee8ae-69d7-4ca5-ade1-9d2450880338/volumes" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.736515 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b15edfd7-749d-45a4-9801-1eba98d77a5e" path="/var/lib/kubelet/pods/b15edfd7-749d-45a4-9801-1eba98d77a5e/volumes" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.738258 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d068cda9-60f0-4802-ae8c-bbb4bb9ac33e" path="/var/lib/kubelet/pods/d068cda9-60f0-4802-ae8c-bbb4bb9ac33e/volumes" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.738926 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1c39090-1743-40c3-95d5-71f5ca126c96" path="/var/lib/kubelet/pods/d1c39090-1743-40c3-95d5-71f5ca126c96/volumes" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.740886 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.740920 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.754677 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.796653 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-7f5484589f-8gmzk"] Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.843307 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.843392 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts podName:dbc1ab9c-f494-4ce9-8758-d5c724e4413a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:14.843345956 +0000 UTC m=+1454.969375519 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts") pod "barbican8b4d-account-delete-dprdr" (UID: "dbc1ab9c-f494-4ce9-8758-d5c724e4413a") : configmap "openstack-scripts" not found Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.843613 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.843636 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts podName:2dac2ddd-1d32-406d-bb47-cbcb0bd71b71 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:14.843629145 +0000 UTC m=+1454.969658708 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts") pod "novacell0a937-account-delete-czmhb" (UID: "2dac2ddd-1d32-406d-bb47-cbcb0bd71b71") : configmap "openstack-scripts" not found Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.843659 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.843677 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts podName:838bc013-33ba-4722-be1d-b88c9016c83a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:14.843672216 +0000 UTC m=+1454.969701769 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts") pod "novaapi7e0a-account-delete-drmkw" (UID: "838bc013-33ba-4722-be1d-b88c9016c83a") : configmap "openstack-scripts" not found Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.843740 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c2a6c3a1e0b539444b03c2c2b147c48f0e4e50e3895eb1146918d21fcc6cd271" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.849907 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c2a6c3a1e0b539444b03c2c2b147c48f0e4e50e3895eb1146918d21fcc6cd271" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.856763 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c2a6c3a1e0b539444b03c2c2b147c48f0e4e50e3895eb1146918d21fcc6cd271" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.856807 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="186ced68-a489-410c-afa6-d4d623c37fc1" containerName="nova-scheduler-scheduler" Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.859219 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-7f5484589f-8gmzk"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.870378 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-6c674848fb-kcq2h"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.884407 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-6c674848fb-kcq2h"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.894103 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.900839 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.906650 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.912649 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.918092 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.933363 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.933440 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.945741 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.948206 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:12 crc kubenswrapper[4932]: E1125 09:13:12.948261 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts podName:36140bfd-540f-40b6-8521-a8a3d408dc9d nodeName:}" failed. No retries permitted until 2025-11-25 09:13:14.948246079 +0000 UTC m=+1455.074275642 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts") pod "glance5148-account-delete-fbhmq" (UID: "36140bfd-540f-40b6-8521-a8a3d408dc9d") : configmap "openstack-scripts" not found Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.958173 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:13:12 crc kubenswrapper[4932]: I1125 09:13:12.969139 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.090709 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.110570 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder71a7-account-delete-wslds" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.116071 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron4147-account-delete-qzg4q" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.255143 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8tr6\" (UniqueName: \"kubernetes.io/projected/633c3722-e337-4b6a-98fe-451ac451dd06-kube-api-access-g8tr6\") pod \"633c3722-e337-4b6a-98fe-451ac451dd06\" (UID: \"633c3722-e337-4b6a-98fe-451ac451dd06\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.255518 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a181c094-1cf9-42bd-b038-cc8a6f437aa3-operator-scripts\") pod \"a181c094-1cf9-42bd-b038-cc8a6f437aa3\" (UID: \"a181c094-1cf9-42bd-b038-cc8a6f437aa3\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.255576 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-confd\") pod \"f41b25a4-f48e-4938-9c23-0d89751af6ae\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.255613 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f41b25a4-f48e-4938-9c23-0d89751af6ae-pod-info\") pod \"f41b25a4-f48e-4938-9c23-0d89751af6ae\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.255671 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-plugins-conf\") pod \"f41b25a4-f48e-4938-9c23-0d89751af6ae\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.255704 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zljvp\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-kube-api-access-zljvp\") pod \"f41b25a4-f48e-4938-9c23-0d89751af6ae\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.255758 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-server-conf\") pod \"f41b25a4-f48e-4938-9c23-0d89751af6ae\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.255783 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-erlang-cookie\") pod \"f41b25a4-f48e-4938-9c23-0d89751af6ae\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.256053 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a181c094-1cf9-42bd-b038-cc8a6f437aa3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a181c094-1cf9-42bd-b038-cc8a6f437aa3" (UID: "a181c094-1cf9-42bd-b038-cc8a6f437aa3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.256215 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzphm\" (UniqueName: \"kubernetes.io/projected/a181c094-1cf9-42bd-b038-cc8a6f437aa3-kube-api-access-zzphm\") pod \"a181c094-1cf9-42bd-b038-cc8a6f437aa3\" (UID: \"a181c094-1cf9-42bd-b038-cc8a6f437aa3\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.256820 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-tls\") pod \"f41b25a4-f48e-4938-9c23-0d89751af6ae\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.256885 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-plugins\") pod \"f41b25a4-f48e-4938-9c23-0d89751af6ae\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.256925 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f41b25a4-f48e-4938-9c23-0d89751af6ae-erlang-cookie-secret\") pod \"f41b25a4-f48e-4938-9c23-0d89751af6ae\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.256953 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data\") pod \"f41b25a4-f48e-4938-9c23-0d89751af6ae\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.256987 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/633c3722-e337-4b6a-98fe-451ac451dd06-operator-scripts\") pod \"633c3722-e337-4b6a-98fe-451ac451dd06\" (UID: \"633c3722-e337-4b6a-98fe-451ac451dd06\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.257003 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"f41b25a4-f48e-4938-9c23-0d89751af6ae\" (UID: \"f41b25a4-f48e-4938-9c23-0d89751af6ae\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.256366 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "f41b25a4-f48e-4938-9c23-0d89751af6ae" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.256657 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "f41b25a4-f48e-4938-9c23-0d89751af6ae" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.257627 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "f41b25a4-f48e-4938-9c23-0d89751af6ae" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.257964 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/633c3722-e337-4b6a-98fe-451ac451dd06-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "633c3722-e337-4b6a-98fe-451ac451dd06" (UID: "633c3722-e337-4b6a-98fe-451ac451dd06"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.258849 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.258873 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.258884 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/633c3722-e337-4b6a-98fe-451ac451dd06-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.258894 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a181c094-1cf9-42bd-b038-cc8a6f437aa3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.258904 4932 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.261615 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a181c094-1cf9-42bd-b038-cc8a6f437aa3-kube-api-access-zzphm" (OuterVolumeSpecName: "kube-api-access-zzphm") pod "a181c094-1cf9-42bd-b038-cc8a6f437aa3" (UID: "a181c094-1cf9-42bd-b038-cc8a6f437aa3"). InnerVolumeSpecName "kube-api-access-zzphm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.261809 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-kube-api-access-zljvp" (OuterVolumeSpecName: "kube-api-access-zljvp") pod "f41b25a4-f48e-4938-9c23-0d89751af6ae" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae"). InnerVolumeSpecName "kube-api-access-zljvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.261928 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/633c3722-e337-4b6a-98fe-451ac451dd06-kube-api-access-g8tr6" (OuterVolumeSpecName: "kube-api-access-g8tr6") pod "633c3722-e337-4b6a-98fe-451ac451dd06" (UID: "633c3722-e337-4b6a-98fe-451ac451dd06"). InnerVolumeSpecName "kube-api-access-g8tr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.262974 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/f41b25a4-f48e-4938-9c23-0d89751af6ae-pod-info" (OuterVolumeSpecName: "pod-info") pod "f41b25a4-f48e-4938-9c23-0d89751af6ae" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.266784 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "f41b25a4-f48e-4938-9c23-0d89751af6ae" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.267500 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f41b25a4-f48e-4938-9c23-0d89751af6ae-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "f41b25a4-f48e-4938-9c23-0d89751af6ae" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.269367 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "f41b25a4-f48e-4938-9c23-0d89751af6ae" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.313821 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data" (OuterVolumeSpecName: "config-data") pod "f41b25a4-f48e-4938-9c23-0d89751af6ae" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.333820 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-server-conf" (OuterVolumeSpecName: "server-conf") pod "f41b25a4-f48e-4938-9c23-0d89751af6ae" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.353474 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "f41b25a4-f48e-4938-9c23-0d89751af6ae" (UID: "f41b25a4-f48e-4938-9c23-0d89751af6ae"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.356085 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.359809 4932 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f41b25a4-f48e-4938-9c23-0d89751af6ae-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.359827 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.359846 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.359855 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8tr6\" (UniqueName: \"kubernetes.io/projected/633c3722-e337-4b6a-98fe-451ac451dd06-kube-api-access-g8tr6\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.359866 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.359873 4932 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f41b25a4-f48e-4938-9c23-0d89751af6ae-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.359882 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zljvp\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-kube-api-access-zljvp\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.359889 4932 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f41b25a4-f48e-4938-9c23-0d89751af6ae-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.359898 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zzphm\" (UniqueName: \"kubernetes.io/projected/a181c094-1cf9-42bd-b038-cc8a6f437aa3-kube-api-access-zzphm\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.359906 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f41b25a4-f48e-4938-9c23-0d89751af6ae-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.384283 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.460984 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/969d317e-0787-44a8-8e27-554b0e887444-pod-info\") pod \"969d317e-0787-44a8-8e27-554b0e887444\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461088 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-erlang-cookie\") pod \"969d317e-0787-44a8-8e27-554b0e887444\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461160 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"969d317e-0787-44a8-8e27-554b0e887444\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461201 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/969d317e-0787-44a8-8e27-554b0e887444-erlang-cookie-secret\") pod \"969d317e-0787-44a8-8e27-554b0e887444\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461224 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-plugins\") pod \"969d317e-0787-44a8-8e27-554b0e887444\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461253 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-server-conf\") pod \"969d317e-0787-44a8-8e27-554b0e887444\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461279 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data\") pod \"969d317e-0787-44a8-8e27-554b0e887444\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461355 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpgft\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-kube-api-access-xpgft\") pod \"969d317e-0787-44a8-8e27-554b0e887444\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461445 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-confd\") pod \"969d317e-0787-44a8-8e27-554b0e887444\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461499 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-plugins-conf\") pod \"969d317e-0787-44a8-8e27-554b0e887444\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461562 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-tls\") pod \"969d317e-0787-44a8-8e27-554b0e887444\" (UID: \"969d317e-0787-44a8-8e27-554b0e887444\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461611 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "969d317e-0787-44a8-8e27-554b0e887444" (UID: "969d317e-0787-44a8-8e27-554b0e887444"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.461870 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "969d317e-0787-44a8-8e27-554b0e887444" (UID: "969d317e-0787-44a8-8e27-554b0e887444"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.462644 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.462666 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.462678 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.463146 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "969d317e-0787-44a8-8e27-554b0e887444" (UID: "969d317e-0787-44a8-8e27-554b0e887444"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.474768 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "969d317e-0787-44a8-8e27-554b0e887444" (UID: "969d317e-0787-44a8-8e27-554b0e887444"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.482003 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-kube-api-access-xpgft" (OuterVolumeSpecName: "kube-api-access-xpgft") pod "969d317e-0787-44a8-8e27-554b0e887444" (UID: "969d317e-0787-44a8-8e27-554b0e887444"). InnerVolumeSpecName "kube-api-access-xpgft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.487156 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "969d317e-0787-44a8-8e27-554b0e887444" (UID: "969d317e-0787-44a8-8e27-554b0e887444"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.492240 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron4147-account-delete-qzg4q" event={"ID":"a181c094-1cf9-42bd-b038-cc8a6f437aa3","Type":"ContainerDied","Data":"8862c2da11a079c7703f4807d33328aec442b1e3307f2cd8b23df468be562b63"} Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.492320 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8862c2da11a079c7703f4807d33328aec442b1e3307f2cd8b23df468be562b63" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.492386 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron4147-account-delete-qzg4q" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.504240 4932 generic.go:334] "Generic (PLEG): container finished" podID="f41b25a4-f48e-4938-9c23-0d89751af6ae" containerID="f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2" exitCode=0 Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.504305 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f41b25a4-f48e-4938-9c23-0d89751af6ae","Type":"ContainerDied","Data":"f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2"} Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.504333 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f41b25a4-f48e-4938-9c23-0d89751af6ae","Type":"ContainerDied","Data":"ebad3f3a2d349b5e6bc3b27737e7776609760d823c543b9b8d9f2ca7bba7d372"} Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.504349 4932 scope.go:117] "RemoveContainer" containerID="f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.504441 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.506304 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/969d317e-0787-44a8-8e27-554b0e887444-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "969d317e-0787-44a8-8e27-554b0e887444" (UID: "969d317e-0787-44a8-8e27-554b0e887444"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.508898 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/969d317e-0787-44a8-8e27-554b0e887444-pod-info" (OuterVolumeSpecName: "pod-info") pod "969d317e-0787-44a8-8e27-554b0e887444" (UID: "969d317e-0787-44a8-8e27-554b0e887444"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.510472 4932 generic.go:334] "Generic (PLEG): container finished" podID="969d317e-0787-44a8-8e27-554b0e887444" containerID="86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a" exitCode=0 Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.510533 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"969d317e-0787-44a8-8e27-554b0e887444","Type":"ContainerDied","Data":"86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a"} Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.510560 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"969d317e-0787-44a8-8e27-554b0e887444","Type":"ContainerDied","Data":"bd3ec1f25bd3e898954df337b54dcdb3d5f65e6189da8a9adbc37a2150d81e01"} Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.510624 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.513512 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder71a7-account-delete-wslds" event={"ID":"633c3722-e337-4b6a-98fe-451ac451dd06","Type":"ContainerDied","Data":"893f8b1e22bf0830d6a92bee6871a9b7767d8b5a153f1818e27f1d2c9e623b8f"} Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.513543 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="893f8b1e22bf0830d6a92bee6871a9b7767d8b5a153f1818e27f1d2c9e623b8f" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.513645 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder71a7-account-delete-wslds" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.514441 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data" (OuterVolumeSpecName: "config-data") pod "969d317e-0787-44a8-8e27-554b0e887444" (UID: "969d317e-0787-44a8-8e27-554b0e887444"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.525710 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-server-conf" (OuterVolumeSpecName: "server-conf") pod "969d317e-0787-44a8-8e27-554b0e887444" (UID: "969d317e-0787-44a8-8e27-554b0e887444"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.527433 4932 scope.go:117] "RemoveContainer" containerID="cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.535767 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c9d818a0-17fd-44a2-8855-a6f847efe274/ovn-northd/0.log" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.537176 4932 generic.go:334] "Generic (PLEG): container finished" podID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerID="2cf6819b94d62fccf47fe857c4bedbcb3672a422e4bfda3c5103104951af3ed6" exitCode=139 Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.537243 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9d818a0-17fd-44a2-8855-a6f847efe274","Type":"ContainerDied","Data":"2cf6819b94d62fccf47fe857c4bedbcb3672a422e4bfda3c5103104951af3ed6"} Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.557420 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.564403 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.565353 4932 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.565384 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.565392 4932 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/969d317e-0787-44a8-8e27-554b0e887444-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.565416 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.565425 4932 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/969d317e-0787-44a8-8e27-554b0e887444-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.565434 4932 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.565442 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/969d317e-0787-44a8-8e27-554b0e887444-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.565453 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpgft\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-kube-api-access-xpgft\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.577986 4932 scope.go:117] "RemoveContainer" containerID="f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2" Nov 25 09:13:13 crc kubenswrapper[4932]: E1125 09:13:13.582378 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2\": container with ID starting with f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2 not found: ID does not exist" containerID="f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.582454 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2"} err="failed to get container status \"f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2\": rpc error: code = NotFound desc = could not find container \"f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2\": container with ID starting with f47490dbc20aa646ca9122b434530976b6bc5a4daad425f614f71beccf8236a2 not found: ID does not exist" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.582504 4932 scope.go:117] "RemoveContainer" containerID="cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126" Nov 25 09:13:13 crc kubenswrapper[4932]: E1125 09:13:13.582947 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126\": container with ID starting with cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126 not found: ID does not exist" containerID="cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.587368 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126"} err="failed to get container status \"cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126\": rpc error: code = NotFound desc = could not find container \"cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126\": container with ID starting with cea9b269376b0126100e6463c531d8bcf8908546bb4a1fa00b8f14257389b126 not found: ID does not exist" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.587477 4932 scope.go:117] "RemoveContainer" containerID="86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.612734 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "969d317e-0787-44a8-8e27-554b0e887444" (UID: "969d317e-0787-44a8-8e27-554b0e887444"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.614521 4932 scope.go:117] "RemoveContainer" containerID="a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.632337 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.649906 4932 scope.go:117] "RemoveContainer" containerID="86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a" Nov 25 09:13:13 crc kubenswrapper[4932]: E1125 09:13:13.651047 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a\": container with ID starting with 86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a not found: ID does not exist" containerID="86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.651095 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a"} err="failed to get container status \"86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a\": rpc error: code = NotFound desc = could not find container \"86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a\": container with ID starting with 86b15896a1eb6b53560d94b1fb53cfe17035fd1c75db50222c120e11c4b6a80a not found: ID does not exist" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.651127 4932 scope.go:117] "RemoveContainer" containerID="a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31" Nov 25 09:13:13 crc kubenswrapper[4932]: E1125 09:13:13.652033 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31\": container with ID starting with a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31 not found: ID does not exist" containerID="a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.652061 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31"} err="failed to get container status \"a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31\": rpc error: code = NotFound desc = could not find container \"a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31\": container with ID starting with a6512388461b5ae27e35163ae39ba5b6a1c60f287ae5af46bd5dc2ae88208f31 not found: ID does not exist" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.667839 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/969d317e-0787-44a8-8e27-554b0e887444-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.667879 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.755883 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c9d818a0-17fd-44a2-8855-a6f847efe274/ovn-northd/0.log" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.758395 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.872736 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-scripts\") pod \"c9d818a0-17fd-44a2-8855-a6f847efe274\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.872789 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5bxn9\" (UniqueName: \"kubernetes.io/projected/c9d818a0-17fd-44a2-8855-a6f847efe274-kube-api-access-5bxn9\") pod \"c9d818a0-17fd-44a2-8855-a6f847efe274\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.872830 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-config\") pod \"c9d818a0-17fd-44a2-8855-a6f847efe274\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.872859 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-northd-tls-certs\") pod \"c9d818a0-17fd-44a2-8855-a6f847efe274\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.872909 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-combined-ca-bundle\") pod \"c9d818a0-17fd-44a2-8855-a6f847efe274\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.872952 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-rundir\") pod \"c9d818a0-17fd-44a2-8855-a6f847efe274\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.872992 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-metrics-certs-tls-certs\") pod \"c9d818a0-17fd-44a2-8855-a6f847efe274\" (UID: \"c9d818a0-17fd-44a2-8855-a6f847efe274\") " Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.874105 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-config" (OuterVolumeSpecName: "config") pod "c9d818a0-17fd-44a2-8855-a6f847efe274" (UID: "c9d818a0-17fd-44a2-8855-a6f847efe274"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.874223 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "c9d818a0-17fd-44a2-8855-a6f847efe274" (UID: "c9d818a0-17fd-44a2-8855-a6f847efe274"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.874316 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-scripts" (OuterVolumeSpecName: "scripts") pod "c9d818a0-17fd-44a2-8855-a6f847efe274" (UID: "c9d818a0-17fd-44a2-8855-a6f847efe274"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.880251 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9d818a0-17fd-44a2-8855-a6f847efe274-kube-api-access-5bxn9" (OuterVolumeSpecName: "kube-api-access-5bxn9") pod "c9d818a0-17fd-44a2-8855-a6f847efe274" (UID: "c9d818a0-17fd-44a2-8855-a6f847efe274"). InnerVolumeSpecName "kube-api-access-5bxn9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.898812 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c9d818a0-17fd-44a2-8855-a6f847efe274" (UID: "c9d818a0-17fd-44a2-8855-a6f847efe274"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.940212 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "c9d818a0-17fd-44a2-8855-a6f847efe274" (UID: "c9d818a0-17fd-44a2-8855-a6f847efe274"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.952525 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "c9d818a0-17fd-44a2-8855-a6f847efe274" (UID: "c9d818a0-17fd-44a2-8855-a6f847efe274"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.974835 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.974874 4932 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.974883 4932 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.974894 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.974903 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5bxn9\" (UniqueName: \"kubernetes.io/projected/c9d818a0-17fd-44a2-8855-a6f847efe274-kube-api-access-5bxn9\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.974911 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9d818a0-17fd-44a2-8855-a6f847efe274-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:13 crc kubenswrapper[4932]: I1125 09:13:13.974919 4932 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9d818a0-17fd-44a2-8855-a6f847efe274-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.124625 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.136479 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.229251 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-xn7j8"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.234362 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-xn7j8"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.249356 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementb0bd-account-delete-vxbgw"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.251326 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-b0bd-account-create-nmsld"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.257511 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-b0bd-account-create-nmsld"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.259332 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placementb0bd-account-delete-vxbgw"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.271316 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.328682 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.368475 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-m77q8"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.380155 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrdct\" (UniqueName: \"kubernetes.io/projected/8134265d-9da9-4607-8db8-98330608ba4c-kube-api-access-rrdct\") pod \"8134265d-9da9-4607-8db8-98330608ba4c\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.380227 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-galera-tls-certs\") pod \"8134265d-9da9-4607-8db8-98330608ba4c\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.380294 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-combined-ca-bundle\") pod \"8134265d-9da9-4607-8db8-98330608ba4c\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.380316 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"8134265d-9da9-4607-8db8-98330608ba4c\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.380346 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-combined-ca-bundle\") pod \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.380382 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-config-data\") pod \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.380423 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znt4v\" (UniqueName: \"kubernetes.io/projected/d586a3b8-c6b8-4c6e-aa6f-11797966d218-kube-api-access-znt4v\") pod \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\" (UID: \"d586a3b8-c6b8-4c6e-aa6f-11797966d218\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.380454 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-kolla-config\") pod \"8134265d-9da9-4607-8db8-98330608ba4c\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.380499 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-operator-scripts\") pod \"8134265d-9da9-4607-8db8-98330608ba4c\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.380531 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8134265d-9da9-4607-8db8-98330608ba4c-config-data-generated\") pod \"8134265d-9da9-4607-8db8-98330608ba4c\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.380559 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-config-data-default\") pod \"8134265d-9da9-4607-8db8-98330608ba4c\" (UID: \"8134265d-9da9-4607-8db8-98330608ba4c\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.381352 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "8134265d-9da9-4607-8db8-98330608ba4c" (UID: "8134265d-9da9-4607-8db8-98330608ba4c"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.383920 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8134265d-9da9-4607-8db8-98330608ba4c-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "8134265d-9da9-4607-8db8-98330608ba4c" (UID: "8134265d-9da9-4607-8db8-98330608ba4c"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.384137 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8134265d-9da9-4607-8db8-98330608ba4c" (UID: "8134265d-9da9-4607-8db8-98330608ba4c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.384164 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "8134265d-9da9-4607-8db8-98330608ba4c" (UID: "8134265d-9da9-4607-8db8-98330608ba4c"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.395273 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d586a3b8-c6b8-4c6e-aa6f-11797966d218-kube-api-access-znt4v" (OuterVolumeSpecName: "kube-api-access-znt4v") pod "d586a3b8-c6b8-4c6e-aa6f-11797966d218" (UID: "d586a3b8-c6b8-4c6e-aa6f-11797966d218"). InnerVolumeSpecName "kube-api-access-znt4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.395627 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8134265d-9da9-4607-8db8-98330608ba4c-kube-api-access-rrdct" (OuterVolumeSpecName: "kube-api-access-rrdct") pod "8134265d-9da9-4607-8db8-98330608ba4c" (UID: "8134265d-9da9-4607-8db8-98330608ba4c"). InnerVolumeSpecName "kube-api-access-rrdct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.399430 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-m77q8"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.405440 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-4147-account-create-dgzdd"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.417009 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "mysql-db") pod "8134265d-9da9-4607-8db8-98330608ba4c" (UID: "8134265d-9da9-4607-8db8-98330608ba4c"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.418704 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8134265d-9da9-4607-8db8-98330608ba4c" (UID: "8134265d-9da9-4607-8db8-98330608ba4c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.418759 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron4147-account-delete-qzg4q"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.435495 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-4147-account-create-dgzdd"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.440681 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-config-data" (OuterVolumeSpecName: "config-data") pod "d586a3b8-c6b8-4c6e-aa6f-11797966d218" (UID: "d586a3b8-c6b8-4c6e-aa6f-11797966d218"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.440951 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron4147-account-delete-qzg4q"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.444027 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d586a3b8-c6b8-4c6e-aa6f-11797966d218" (UID: "d586a3b8-c6b8-4c6e-aa6f-11797966d218"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.447687 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-29cl8"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.452719 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-29cl8"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.457705 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-71a7-account-create-tg6zv"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.459809 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "8134265d-9da9-4607-8db8-98330608ba4c" (UID: "8134265d-9da9-4607-8db8-98330608ba4c"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.462399 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder71a7-account-delete-wslds"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.467236 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-71a7-account-create-tg6zv"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.471856 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder71a7-account-delete-wslds"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.482749 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8134265d-9da9-4607-8db8-98330608ba4c-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.482786 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.482795 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrdct\" (UniqueName: \"kubernetes.io/projected/8134265d-9da9-4607-8db8-98330608ba4c-kube-api-access-rrdct\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.482803 4932 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.482812 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8134265d-9da9-4607-8db8-98330608ba4c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.482841 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.482852 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.482861 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d586a3b8-c6b8-4c6e-aa6f-11797966d218-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.482870 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znt4v\" (UniqueName: \"kubernetes.io/projected/d586a3b8-c6b8-4c6e-aa6f-11797966d218-kube-api-access-znt4v\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.482882 4932 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.482893 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8134265d-9da9-4607-8db8-98330608ba4c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.502152 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.582739 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-lgz2l"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.583959 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.589533 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-lgz2l"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.598443 4932 generic.go:334] "Generic (PLEG): container finished" podID="8153c48a-65e5-4525-b3ca-4dba83d94681" containerID="cf9486063626577ad9657d77cfb72663e93d27944dae23244b9e36f69d66b24d" exitCode=0 Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.598536 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6fb96c5d7c-tsdlh" event={"ID":"8153c48a-65e5-4525-b3ca-4dba83d94681","Type":"ContainerDied","Data":"cf9486063626577ad9657d77cfb72663e93d27944dae23244b9e36f69d66b24d"} Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.617097 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c9d818a0-17fd-44a2-8855-a6f847efe274/ovn-northd/0.log" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.617752 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.618237 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31823923-9ce9-49e0-b4c1-42418d49918c" path="/var/lib/kubelet/pods/31823923-9ce9-49e0-b4c1-42418d49918c/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.619105 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62178c03-bbfb-4b80-b594-3507a4563e0b" path="/var/lib/kubelet/pods/62178c03-bbfb-4b80-b594-3507a4563e0b/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.624575 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="633c3722-e337-4b6a-98fe-451ac451dd06" path="/var/lib/kubelet/pods/633c3722-e337-4b6a-98fe-451ac451dd06/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.625144 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" path="/var/lib/kubelet/pods/90c30cef-5376-4f4a-8d59-9ab6daff902d/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.625766 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="948cc941-e4e1-4f79-80e1-c3a9594314fc" path="/var/lib/kubelet/pods/948cc941-e4e1-4f79-80e1-c3a9594314fc/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.627211 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="969d317e-0787-44a8-8e27-554b0e887444" path="/var/lib/kubelet/pods/969d317e-0787-44a8-8e27-554b0e887444/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.627868 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" path="/var/lib/kubelet/pods/9e365f51-6fe5-47b3-b183-5cf5cae5c65e/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.628867 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a181c094-1cf9-42bd-b038-cc8a6f437aa3" path="/var/lib/kubelet/pods/a181c094-1cf9-42bd-b038-cc8a6f437aa3/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.630283 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4675049-b86a-4228-b9f1-c9112c3dd34e" path="/var/lib/kubelet/pods/a4675049-b86a-4228-b9f1-c9112c3dd34e/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.630803 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b57cfb59-e562-4fb2-bfad-b4cf5382c45a" path="/var/lib/kubelet/pods/b57cfb59-e562-4fb2-bfad-b4cf5382c45a/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.635137 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc7c8ac5-7063-4937-b0c9-9fcad5484c99" path="/var/lib/kubelet/pods/bc7c8ac5-7063-4937-b0c9-9fcad5484c99/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.636098 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" path="/var/lib/kubelet/pods/c1e12e22-8a2c-4093-b9c5-7cc68348e0ee/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.637100 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c205eefb-e35b-43b4-8288-a96280db4b43" path="/var/lib/kubelet/pods/c205eefb-e35b-43b4-8288-a96280db4b43/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.638528 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5101ae2-5106-48c7-9116-4c0e5ededb84" path="/var/lib/kubelet/pods/c5101ae2-5106-48c7-9116-4c0e5ededb84/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.639566 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7865402-5a21-44f9-9436-d5d1bab67a07" path="/var/lib/kubelet/pods/c7865402-5a21-44f9-9436-d5d1bab67a07/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.642996 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc680bc2-b240-40b6-b77e-c0d264f283b3" path="/var/lib/kubelet/pods/cc680bc2-b240-40b6-b77e-c0d264f283b3/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.644729 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce711acf-071a-4387-8c42-e2f3f8c25df9" path="/var/lib/kubelet/pods/ce711acf-071a-4387-8c42-e2f3f8c25df9/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.645416 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcf01003-0e2e-4a81-8be7-234708a1caf4" path="/var/lib/kubelet/pods/dcf01003-0e2e-4a81-8be7-234708a1caf4/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.646057 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e59aed73-dc42-4763-9c59-075e6206d38a" path="/var/lib/kubelet/pods/e59aed73-dc42-4763-9c59-075e6206d38a/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.648272 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f41b25a4-f48e-4938-9c23-0d89751af6ae" path="/var/lib/kubelet/pods/f41b25a4-f48e-4938-9c23-0d89751af6ae/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.650935 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" path="/var/lib/kubelet/pods/f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61/volumes" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.652575 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9d818a0-17fd-44a2-8855-a6f847efe274","Type":"ContainerDied","Data":"e3a0478c158f464f91ac01edc36339ead731fb9020f463add5e4c96c1931c20e"} Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.652612 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance5148-account-delete-fbhmq"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.652639 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-5148-account-create-jzfrs"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.652657 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-5148-account-create-jzfrs"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.652881 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance5148-account-delete-fbhmq" podUID="36140bfd-540f-40b6-8521-a8a3d408dc9d" containerName="mariadb-account-delete" containerID="cri-o://b3077aa432072fd3e5326ed9fb2a90716d917a8e76bd378c56a62bc655717477" gracePeriod=30 Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.653033 4932 scope.go:117] "RemoveContainer" containerID="2cf6819b94d62fccf47fe857c4bedbcb3672a422e4bfda3c5103104951af3ed6" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.664951 4932 generic.go:334] "Generic (PLEG): container finished" podID="8134265d-9da9-4607-8db8-98330608ba4c" containerID="1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541" exitCode=0 Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.665009 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8134265d-9da9-4607-8db8-98330608ba4c","Type":"ContainerDied","Data":"1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541"} Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.665035 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"8134265d-9da9-4607-8db8-98330608ba4c","Type":"ContainerDied","Data":"3127d6ce8a76318742eeaa12b888412bcee21bd03ffca649914f7968657cb7a9"} Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.665091 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.676997 4932 generic.go:334] "Generic (PLEG): container finished" podID="186ced68-a489-410c-afa6-d4d623c37fc1" containerID="c2a6c3a1e0b539444b03c2c2b147c48f0e4e50e3895eb1146918d21fcc6cd271" exitCode=0 Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.677067 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"186ced68-a489-410c-afa6-d4d623c37fc1","Type":"ContainerDied","Data":"c2a6c3a1e0b539444b03c2c2b147c48f0e4e50e3895eb1146918d21fcc6cd271"} Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.680497 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-rbz2d"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.697483 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.699877 4932 generic.go:334] "Generic (PLEG): container finished" podID="d586a3b8-c6b8-4c6e-aa6f-11797966d218" containerID="9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863" exitCode=0 Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.699910 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"d586a3b8-c6b8-4c6e-aa6f-11797966d218","Type":"ContainerDied","Data":"9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863"} Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.699933 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"d586a3b8-c6b8-4c6e-aa6f-11797966d218","Type":"ContainerDied","Data":"2e1d2da76a36d06943a2765c3b2ee678c59cdcdf2b414703bff0876df2fc7415"} Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.700026 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.702539 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.709356 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-rbz2d"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.722449 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-8b4d-account-create-j47jt"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.737228 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-8b4d-account-create-j47jt"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.752306 4932 scope.go:117] "RemoveContainer" containerID="385bb0f63503360fe1dd3b8bc517012f4e561bae2dc4d40f0fb11f4b6501c4c1" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.757669 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican8b4d-account-delete-dprdr"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.757941 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican8b4d-account-delete-dprdr" podUID="dbc1ab9c-f494-4ce9-8758-d5c724e4413a" containerName="mariadb-account-delete" containerID="cri-o://c17d2b7a60e488f01f5b61b845be2ae08ecd0dcb78cfdb75a8b72ffb6d34fa27" gracePeriod=30 Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.788748 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.799791 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.805062 4932 scope.go:117] "RemoveContainer" containerID="1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.812337 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.827105 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.850603 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.857068 4932 scope.go:117] "RemoveContainer" containerID="659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.888905 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-config-data\") pod \"186ced68-a489-410c-afa6-d4d623c37fc1\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.889093 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-combined-ca-bundle\") pod \"186ced68-a489-410c-afa6-d4d623c37fc1\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.889218 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w94hg\" (UniqueName: \"kubernetes.io/projected/186ced68-a489-410c-afa6-d4d623c37fc1-kube-api-access-w94hg\") pod \"186ced68-a489-410c-afa6-d4d623c37fc1\" (UID: \"186ced68-a489-410c-afa6-d4d623c37fc1\") " Nov 25 09:13:14 crc kubenswrapper[4932]: E1125 09:13:14.889715 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:14 crc kubenswrapper[4932]: E1125 09:13:14.889777 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts podName:838bc013-33ba-4722-be1d-b88c9016c83a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:18.889757809 +0000 UTC m=+1459.015787372 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts") pod "novaapi7e0a-account-delete-drmkw" (UID: "838bc013-33ba-4722-be1d-b88c9016c83a") : configmap "openstack-scripts" not found Nov 25 09:13:14 crc kubenswrapper[4932]: E1125 09:13:14.890683 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:14 crc kubenswrapper[4932]: E1125 09:13:14.890718 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts podName:dbc1ab9c-f494-4ce9-8758-d5c724e4413a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:18.890707196 +0000 UTC m=+1459.016736759 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts") pod "barbican8b4d-account-delete-dprdr" (UID: "dbc1ab9c-f494-4ce9-8758-d5c724e4413a") : configmap "openstack-scripts" not found Nov 25 09:13:14 crc kubenswrapper[4932]: E1125 09:13:14.890750 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:14 crc kubenswrapper[4932]: E1125 09:13:14.890777 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts podName:2dac2ddd-1d32-406d-bb47-cbcb0bd71b71 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:18.890768688 +0000 UTC m=+1459.016798251 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts") pod "novacell0a937-account-delete-czmhb" (UID: "2dac2ddd-1d32-406d-bb47-cbcb0bd71b71") : configmap "openstack-scripts" not found Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.898741 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/186ced68-a489-410c-afa6-d4d623c37fc1-kube-api-access-w94hg" (OuterVolumeSpecName: "kube-api-access-w94hg") pod "186ced68-a489-410c-afa6-d4d623c37fc1" (UID: "186ced68-a489-410c-afa6-d4d623c37fc1"). InnerVolumeSpecName "kube-api-access-w94hg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.929173 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-q55gm"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.940167 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-q55gm"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.940546 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "186ced68-a489-410c-afa6-d4d623c37fc1" (UID: "186ced68-a489-410c-afa6-d4d623c37fc1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.943774 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-config-data" (OuterVolumeSpecName: "config-data") pod "186ced68-a489-410c-afa6-d4d623c37fc1" (UID: "186ced68-a489-410c-afa6-d4d623c37fc1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.949663 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-7e0a-account-create-4tk2l"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.956079 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi7e0a-account-delete-drmkw"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.956388 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novaapi7e0a-account-delete-drmkw" podUID="838bc013-33ba-4722-be1d-b88c9016c83a" containerName="mariadb-account-delete" containerID="cri-o://9b476db280639f6d81d327b4980e19692d4945304938d0452dc6f72cbad0dc3c" gracePeriod=30 Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.961104 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-7e0a-account-create-4tk2l"] Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.990707 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w94hg\" (UniqueName: \"kubernetes.io/projected/186ced68-a489-410c-afa6-d4d623c37fc1-kube-api-access-w94hg\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.991097 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: I1125 09:13:14.991112 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/186ced68-a489-410c-afa6-d4d623c37fc1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:14 crc kubenswrapper[4932]: E1125 09:13:14.991218 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:14 crc kubenswrapper[4932]: E1125 09:13:14.991275 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts podName:36140bfd-540f-40b6-8521-a8a3d408dc9d nodeName:}" failed. No retries permitted until 2025-11-25 09:13:18.991256263 +0000 UTC m=+1459.117285826 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts") pod "glance5148-account-delete-fbhmq" (UID: "36140bfd-540f-40b6-8521-a8a3d408dc9d") : configmap "openstack-scripts" not found Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.002750 4932 scope.go:117] "RemoveContainer" containerID="1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541" Nov 25 09:13:15 crc kubenswrapper[4932]: E1125 09:13:15.003217 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541\": container with ID starting with 1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541 not found: ID does not exist" containerID="1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.003261 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541"} err="failed to get container status \"1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541\": rpc error: code = NotFound desc = could not find container \"1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541\": container with ID starting with 1fec0c127d51823849622b6b92d1374eec492b38b47bc86f31a4c789a6d00541 not found: ID does not exist" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.003303 4932 scope.go:117] "RemoveContainer" containerID="659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd" Nov 25 09:13:15 crc kubenswrapper[4932]: E1125 09:13:15.003648 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd\": container with ID starting with 659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd not found: ID does not exist" containerID="659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.003670 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd"} err="failed to get container status \"659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd\": rpc error: code = NotFound desc = could not find container \"659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd\": container with ID starting with 659410f3e179673e519464b18c0db2561436e5f7e651c66d2f2dc7308a27ecbd not found: ID does not exist" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.003685 4932 scope.go:117] "RemoveContainer" containerID="9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.018487 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.024723 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-5mlqd"] Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.028405 4932 scope.go:117] "RemoveContainer" containerID="9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863" Nov 25 09:13:15 crc kubenswrapper[4932]: E1125 09:13:15.031437 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863\": container with ID starting with 9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863 not found: ID does not exist" containerID="9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.031513 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863"} err="failed to get container status \"9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863\": rpc error: code = NotFound desc = could not find container \"9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863\": container with ID starting with 9dcf6da498e3d93cd5e527628db4c025cb266ef701da083d7dd47fea6a504863 not found: ID does not exist" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.040896 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-5mlqd"] Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.054901 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0a937-account-delete-czmhb"] Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.055146 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novacell0a937-account-delete-czmhb" podUID="2dac2ddd-1d32-406d-bb47-cbcb0bd71b71" containerName="mariadb-account-delete" containerID="cri-o://9b60a7120977a356bc3ba5ed856d8a02152e5c7b2b97a474909ee1d09c5d2fa5" gracePeriod=30 Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.073601 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-a937-account-create-d2hjq"] Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.081318 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-a937-account-create-d2hjq"] Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.091914 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-config-data\") pod \"8153c48a-65e5-4525-b3ca-4dba83d94681\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.091974 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-credential-keys\") pod \"8153c48a-65e5-4525-b3ca-4dba83d94681\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.092010 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-public-tls-certs\") pod \"8153c48a-65e5-4525-b3ca-4dba83d94681\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.092042 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-combined-ca-bundle\") pod \"8153c48a-65e5-4525-b3ca-4dba83d94681\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.092110 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-fernet-keys\") pod \"8153c48a-65e5-4525-b3ca-4dba83d94681\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.092128 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rcb97\" (UniqueName: \"kubernetes.io/projected/8153c48a-65e5-4525-b3ca-4dba83d94681-kube-api-access-rcb97\") pod \"8153c48a-65e5-4525-b3ca-4dba83d94681\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.092256 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-internal-tls-certs\") pod \"8153c48a-65e5-4525-b3ca-4dba83d94681\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.092278 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-scripts\") pod \"8153c48a-65e5-4525-b3ca-4dba83d94681\" (UID: \"8153c48a-65e5-4525-b3ca-4dba83d94681\") " Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.096661 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "8153c48a-65e5-4525-b3ca-4dba83d94681" (UID: "8153c48a-65e5-4525-b3ca-4dba83d94681"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.096702 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8153c48a-65e5-4525-b3ca-4dba83d94681-kube-api-access-rcb97" (OuterVolumeSpecName: "kube-api-access-rcb97") pod "8153c48a-65e5-4525-b3ca-4dba83d94681" (UID: "8153c48a-65e5-4525-b3ca-4dba83d94681"). InnerVolumeSpecName "kube-api-access-rcb97". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.098669 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "8153c48a-65e5-4525-b3ca-4dba83d94681" (UID: "8153c48a-65e5-4525-b3ca-4dba83d94681"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.109282 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-scripts" (OuterVolumeSpecName: "scripts") pod "8153c48a-65e5-4525-b3ca-4dba83d94681" (UID: "8153c48a-65e5-4525-b3ca-4dba83d94681"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.111288 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-config-data" (OuterVolumeSpecName: "config-data") pod "8153c48a-65e5-4525-b3ca-4dba83d94681" (UID: "8153c48a-65e5-4525-b3ca-4dba83d94681"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.113151 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8153c48a-65e5-4525-b3ca-4dba83d94681" (UID: "8153c48a-65e5-4525-b3ca-4dba83d94681"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.128009 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8153c48a-65e5-4525-b3ca-4dba83d94681" (UID: "8153c48a-65e5-4525-b3ca-4dba83d94681"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.139240 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8153c48a-65e5-4525-b3ca-4dba83d94681" (UID: "8153c48a-65e5-4525-b3ca-4dba83d94681"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.194315 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.194353 4932 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.194366 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rcb97\" (UniqueName: \"kubernetes.io/projected/8153c48a-65e5-4525-b3ca-4dba83d94681-kube-api-access-rcb97\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.194380 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.194391 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.194403 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.194414 4932 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.194424 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8153c48a-65e5-4525-b3ca-4dba83d94681-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:15 crc kubenswrapper[4932]: E1125 09:13:15.588202 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:15 crc kubenswrapper[4932]: E1125 09:13:15.588566 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:15 crc kubenswrapper[4932]: E1125 09:13:15.588841 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:15 crc kubenswrapper[4932]: E1125 09:13:15.588921 4932 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server" Nov 25 09:13:15 crc kubenswrapper[4932]: E1125 09:13:15.589812 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:15 crc kubenswrapper[4932]: E1125 09:13:15.591208 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:15 crc kubenswrapper[4932]: E1125 09:13:15.592652 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:15 crc kubenswrapper[4932]: E1125 09:13:15.592708 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovs-vswitchd" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.710881 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6fb96c5d7c-tsdlh" event={"ID":"8153c48a-65e5-4525-b3ca-4dba83d94681","Type":"ContainerDied","Data":"c326432da8ac46fcc278b4e68d6bf8adbdc33df2c2102fab16bf09e99eeb7ab6"} Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.710952 4932 scope.go:117] "RemoveContainer" containerID="cf9486063626577ad9657d77cfb72663e93d27944dae23244b9e36f69d66b24d" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.710908 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6fb96c5d7c-tsdlh" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.715002 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"186ced68-a489-410c-afa6-d4d623c37fc1","Type":"ContainerDied","Data":"2c95ef3ee48e374fa43a0d30f685f4e1d459ccc6e3f652192c87c848ba65d5cb"} Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.715014 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.745441 4932 scope.go:117] "RemoveContainer" containerID="c2a6c3a1e0b539444b03c2c2b147c48f0e4e50e3895eb1146918d21fcc6cd271" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.754383 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-6fb96c5d7c-tsdlh"] Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.765981 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-6fb96c5d7c-tsdlh"] Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.799266 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.804369 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.865700 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:13:15 crc kubenswrapper[4932]: I1125 09:13:15.932450 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.110912 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gxjgb"] Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.615896 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="186ced68-a489-410c-afa6-d4d623c37fc1" path="/var/lib/kubelet/pods/186ced68-a489-410c-afa6-d4d623c37fc1/volumes" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.616932 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="283026d1-b94f-47c9-9a9e-3b85e009715a" path="/var/lib/kubelet/pods/283026d1-b94f-47c9-9a9e-3b85e009715a/volumes" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.617599 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="339c14cf-c65c-41e7-a983-d16b36bf01ea" path="/var/lib/kubelet/pods/339c14cf-c65c-41e7-a983-d16b36bf01ea/volumes" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.618375 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58597054-bb2c-440e-88cc-7f969e6ee0bb" path="/var/lib/kubelet/pods/58597054-bb2c-440e-88cc-7f969e6ee0bb/volumes" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.619625 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="717d7a0f-3f58-404d-8ffc-c95f75ebd799" path="/var/lib/kubelet/pods/717d7a0f-3f58-404d-8ffc-c95f75ebd799/volumes" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.620232 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8134265d-9da9-4607-8db8-98330608ba4c" path="/var/lib/kubelet/pods/8134265d-9da9-4607-8db8-98330608ba4c/volumes" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.621170 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8153c48a-65e5-4525-b3ca-4dba83d94681" path="/var/lib/kubelet/pods/8153c48a-65e5-4525-b3ca-4dba83d94681/volumes" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.621653 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afee3e89-74a3-4ad0-ac06-c5c97efa8543" path="/var/lib/kubelet/pods/afee3e89-74a3-4ad0-ac06-c5c97efa8543/volumes" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.622159 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1" path="/var/lib/kubelet/pods/b7f9e8e7-0f9b-486f-a7fe-4e851a4145a1/volumes" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.622703 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" path="/var/lib/kubelet/pods/c9d818a0-17fd-44a2-8855-a6f847efe274/volumes" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.623686 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d586a3b8-c6b8-4c6e-aa6f-11797966d218" path="/var/lib/kubelet/pods/d586a3b8-c6b8-4c6e-aa6f-11797966d218/volumes" Nov 25 09:13:16 crc kubenswrapper[4932]: I1125 09:13:16.624211 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc642fd3-edd5-4598-9eeb-06bdb9748b1a" path="/var/lib/kubelet/pods/fc642fd3-edd5-4598-9eeb-06bdb9748b1a/volumes" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.469062 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.532611 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-config-data\") pod \"90db5718-c185-4863-888a-6cb41ca5339d\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.532689 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-scripts\") pod \"90db5718-c185-4863-888a-6cb41ca5339d\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.532773 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-combined-ca-bundle\") pod \"90db5718-c185-4863-888a-6cb41ca5339d\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.532799 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-ceilometer-tls-certs\") pod \"90db5718-c185-4863-888a-6cb41ca5339d\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.532838 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-log-httpd\") pod \"90db5718-c185-4863-888a-6cb41ca5339d\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.532857 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-run-httpd\") pod \"90db5718-c185-4863-888a-6cb41ca5339d\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.532924 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g2v88\" (UniqueName: \"kubernetes.io/projected/90db5718-c185-4863-888a-6cb41ca5339d-kube-api-access-g2v88\") pod \"90db5718-c185-4863-888a-6cb41ca5339d\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.532948 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-sg-core-conf-yaml\") pod \"90db5718-c185-4863-888a-6cb41ca5339d\" (UID: \"90db5718-c185-4863-888a-6cb41ca5339d\") " Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.533575 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "90db5718-c185-4863-888a-6cb41ca5339d" (UID: "90db5718-c185-4863-888a-6cb41ca5339d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.533828 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "90db5718-c185-4863-888a-6cb41ca5339d" (UID: "90db5718-c185-4863-888a-6cb41ca5339d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.546290 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-scripts" (OuterVolumeSpecName: "scripts") pod "90db5718-c185-4863-888a-6cb41ca5339d" (UID: "90db5718-c185-4863-888a-6cb41ca5339d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.555845 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90db5718-c185-4863-888a-6cb41ca5339d-kube-api-access-g2v88" (OuterVolumeSpecName: "kube-api-access-g2v88") pod "90db5718-c185-4863-888a-6cb41ca5339d" (UID: "90db5718-c185-4863-888a-6cb41ca5339d"). InnerVolumeSpecName "kube-api-access-g2v88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.579258 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "90db5718-c185-4863-888a-6cb41ca5339d" (UID: "90db5718-c185-4863-888a-6cb41ca5339d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.591665 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "90db5718-c185-4863-888a-6cb41ca5339d" (UID: "90db5718-c185-4863-888a-6cb41ca5339d"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.624087 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "90db5718-c185-4863-888a-6cb41ca5339d" (UID: "90db5718-c185-4863-888a-6cb41ca5339d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.634178 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.634223 4932 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.634234 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.634241 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/90db5718-c185-4863-888a-6cb41ca5339d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.634251 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g2v88\" (UniqueName: \"kubernetes.io/projected/90db5718-c185-4863-888a-6cb41ca5339d-kube-api-access-g2v88\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.634261 4932 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.634268 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.652103 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-config-data" (OuterVolumeSpecName: "config-data") pod "90db5718-c185-4863-888a-6cb41ca5339d" (UID: "90db5718-c185-4863-888a-6cb41ca5339d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.735572 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90db5718-c185-4863-888a-6cb41ca5339d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.751655 4932 generic.go:334] "Generic (PLEG): container finished" podID="90db5718-c185-4863-888a-6cb41ca5339d" containerID="b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2" exitCode=0 Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.751748 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.751785 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"90db5718-c185-4863-888a-6cb41ca5339d","Type":"ContainerDied","Data":"b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2"} Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.751813 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"90db5718-c185-4863-888a-6cb41ca5339d","Type":"ContainerDied","Data":"9b3bbf21cc0cd1b693451b19c7b9b415c76cfc7281f20ceff7143d8d1ea4325a"} Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.751833 4932 scope.go:117] "RemoveContainer" containerID="44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.751888 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gxjgb" podUID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerName="registry-server" containerID="cri-o://30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270" gracePeriod=2 Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.799111 4932 scope.go:117] "RemoveContainer" containerID="5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.808409 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.818974 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.821422 4932 scope.go:117] "RemoveContainer" containerID="b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.859964 4932 scope.go:117] "RemoveContainer" containerID="d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.917180 4932 scope.go:117] "RemoveContainer" containerID="44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d" Nov 25 09:13:17 crc kubenswrapper[4932]: E1125 09:13:17.917872 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d\": container with ID starting with 44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d not found: ID does not exist" containerID="44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.917919 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d"} err="failed to get container status \"44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d\": rpc error: code = NotFound desc = could not find container \"44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d\": container with ID starting with 44451e9c9846d6f156274571b1e7d7b8c02278359d1e11e5256016601a927c4d not found: ID does not exist" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.917952 4932 scope.go:117] "RemoveContainer" containerID="5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028" Nov 25 09:13:17 crc kubenswrapper[4932]: E1125 09:13:17.918469 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028\": container with ID starting with 5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028 not found: ID does not exist" containerID="5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.918548 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028"} err="failed to get container status \"5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028\": rpc error: code = NotFound desc = could not find container \"5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028\": container with ID starting with 5d137ec4d673f04fff3855add488c4d79374435c0aaddc74628c201f36bad028 not found: ID does not exist" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.918585 4932 scope.go:117] "RemoveContainer" containerID="b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2" Nov 25 09:13:17 crc kubenswrapper[4932]: E1125 09:13:17.920747 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2\": container with ID starting with b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2 not found: ID does not exist" containerID="b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.920984 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2"} err="failed to get container status \"b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2\": rpc error: code = NotFound desc = could not find container \"b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2\": container with ID starting with b1c4293c7fafc82fecb19fb64634d6b1943da3d31b188f0bbc6f11aee4b8a7f2 not found: ID does not exist" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.921018 4932 scope.go:117] "RemoveContainer" containerID="d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4" Nov 25 09:13:17 crc kubenswrapper[4932]: E1125 09:13:17.921544 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4\": container with ID starting with d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4 not found: ID does not exist" containerID="d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4" Nov 25 09:13:17 crc kubenswrapper[4932]: I1125 09:13:17.921583 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4"} err="failed to get container status \"d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4\": rpc error: code = NotFound desc = could not find container \"d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4\": container with ID starting with d5c84ba85d4e1782d5e165c3437eb621e81a36867962a063f2c26881f4c069b4 not found: ID does not exist" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.165108 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.243000 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-catalog-content\") pod \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.243408 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-utilities\") pod \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.243439 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9npt\" (UniqueName: \"kubernetes.io/projected/84b6148f-b9bf-41ef-a1ba-c282f94882ee-kube-api-access-z9npt\") pod \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\" (UID: \"84b6148f-b9bf-41ef-a1ba-c282f94882ee\") " Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.244786 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-utilities" (OuterVolumeSpecName: "utilities") pod "84b6148f-b9bf-41ef-a1ba-c282f94882ee" (UID: "84b6148f-b9bf-41ef-a1ba-c282f94882ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.247916 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84b6148f-b9bf-41ef-a1ba-c282f94882ee-kube-api-access-z9npt" (OuterVolumeSpecName: "kube-api-access-z9npt") pod "84b6148f-b9bf-41ef-a1ba-c282f94882ee" (UID: "84b6148f-b9bf-41ef-a1ba-c282f94882ee"). InnerVolumeSpecName "kube-api-access-z9npt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.345607 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.345648 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9npt\" (UniqueName: \"kubernetes.io/projected/84b6148f-b9bf-41ef-a1ba-c282f94882ee-kube-api-access-z9npt\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.350094 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "84b6148f-b9bf-41ef-a1ba-c282f94882ee" (UID: "84b6148f-b9bf-41ef-a1ba-c282f94882ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.447159 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84b6148f-b9bf-41ef-a1ba-c282f94882ee-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.617125 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90db5718-c185-4863-888a-6cb41ca5339d" path="/var/lib/kubelet/pods/90db5718-c185-4863-888a-6cb41ca5339d/volumes" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.762617 4932 generic.go:334] "Generic (PLEG): container finished" podID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerID="30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270" exitCode=0 Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.762672 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxjgb" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.762671 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjgb" event={"ID":"84b6148f-b9bf-41ef-a1ba-c282f94882ee","Type":"ContainerDied","Data":"30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270"} Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.762722 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjgb" event={"ID":"84b6148f-b9bf-41ef-a1ba-c282f94882ee","Type":"ContainerDied","Data":"615ae82b4ad7794a30e582433291f8eaf5d40716fe8b7598f7954cf40108bfa5"} Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.762749 4932 scope.go:117] "RemoveContainer" containerID="30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.785362 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gxjgb"] Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.791748 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gxjgb"] Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.799257 4932 scope.go:117] "RemoveContainer" containerID="312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.818699 4932 scope.go:117] "RemoveContainer" containerID="f82c61ca49ceda5f978194d115d5b068d1ddf690d8be385dc6e6283a09b7e99d" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.842490 4932 scope.go:117] "RemoveContainer" containerID="30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270" Nov 25 09:13:18 crc kubenswrapper[4932]: E1125 09:13:18.843739 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270\": container with ID starting with 30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270 not found: ID does not exist" containerID="30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.843766 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270"} err="failed to get container status \"30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270\": rpc error: code = NotFound desc = could not find container \"30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270\": container with ID starting with 30aa278c4300a409f1f97cbf4ab5ed3b04228395b282cbde4965b88052d74270 not found: ID does not exist" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.843786 4932 scope.go:117] "RemoveContainer" containerID="312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220" Nov 25 09:13:18 crc kubenswrapper[4932]: E1125 09:13:18.844150 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220\": container with ID starting with 312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220 not found: ID does not exist" containerID="312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.844169 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220"} err="failed to get container status \"312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220\": rpc error: code = NotFound desc = could not find container \"312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220\": container with ID starting with 312a0e5b311e554233c3015142912fa04dde42e203d28aadec4ab22b96d11220 not found: ID does not exist" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.844182 4932 scope.go:117] "RemoveContainer" containerID="f82c61ca49ceda5f978194d115d5b068d1ddf690d8be385dc6e6283a09b7e99d" Nov 25 09:13:18 crc kubenswrapper[4932]: E1125 09:13:18.844895 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f82c61ca49ceda5f978194d115d5b068d1ddf690d8be385dc6e6283a09b7e99d\": container with ID starting with f82c61ca49ceda5f978194d115d5b068d1ddf690d8be385dc6e6283a09b7e99d not found: ID does not exist" containerID="f82c61ca49ceda5f978194d115d5b068d1ddf690d8be385dc6e6283a09b7e99d" Nov 25 09:13:18 crc kubenswrapper[4932]: I1125 09:13:18.844916 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f82c61ca49ceda5f978194d115d5b068d1ddf690d8be385dc6e6283a09b7e99d"} err="failed to get container status \"f82c61ca49ceda5f978194d115d5b068d1ddf690d8be385dc6e6283a09b7e99d\": rpc error: code = NotFound desc = could not find container \"f82c61ca49ceda5f978194d115d5b068d1ddf690d8be385dc6e6283a09b7e99d\": container with ID starting with f82c61ca49ceda5f978194d115d5b068d1ddf690d8be385dc6e6283a09b7e99d not found: ID does not exist" Nov 25 09:13:18 crc kubenswrapper[4932]: E1125 09:13:18.956341 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:18 crc kubenswrapper[4932]: E1125 09:13:18.956421 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts podName:2dac2ddd-1d32-406d-bb47-cbcb0bd71b71 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:26.956400694 +0000 UTC m=+1467.082430257 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts") pod "novacell0a937-account-delete-czmhb" (UID: "2dac2ddd-1d32-406d-bb47-cbcb0bd71b71") : configmap "openstack-scripts" not found Nov 25 09:13:18 crc kubenswrapper[4932]: E1125 09:13:18.956505 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:18 crc kubenswrapper[4932]: E1125 09:13:18.956544 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:18 crc kubenswrapper[4932]: E1125 09:13:18.956622 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts podName:838bc013-33ba-4722-be1d-b88c9016c83a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:26.95659914 +0000 UTC m=+1467.082628723 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts") pod "novaapi7e0a-account-delete-drmkw" (UID: "838bc013-33ba-4722-be1d-b88c9016c83a") : configmap "openstack-scripts" not found Nov 25 09:13:18 crc kubenswrapper[4932]: E1125 09:13:18.956649 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts podName:dbc1ab9c-f494-4ce9-8758-d5c724e4413a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:26.956638201 +0000 UTC m=+1467.082667784 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts") pod "barbican8b4d-account-delete-dprdr" (UID: "dbc1ab9c-f494-4ce9-8758-d5c724e4413a") : configmap "openstack-scripts" not found Nov 25 09:13:19 crc kubenswrapper[4932]: E1125 09:13:19.058761 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:19 crc kubenswrapper[4932]: E1125 09:13:19.058826 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts podName:36140bfd-540f-40b6-8521-a8a3d408dc9d nodeName:}" failed. No retries permitted until 2025-11-25 09:13:27.058812285 +0000 UTC m=+1467.184841848 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts") pod "glance5148-account-delete-fbhmq" (UID: "36140bfd-540f-40b6-8521-a8a3d408dc9d") : configmap "openstack-scripts" not found Nov 25 09:13:20 crc kubenswrapper[4932]: E1125 09:13:20.589146 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:20 crc kubenswrapper[4932]: E1125 09:13:20.589882 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:20 crc kubenswrapper[4932]: E1125 09:13:20.590155 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:20 crc kubenswrapper[4932]: E1125 09:13:20.590203 4932 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server" Nov 25 09:13:20 crc kubenswrapper[4932]: E1125 09:13:20.592877 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:20 crc kubenswrapper[4932]: E1125 09:13:20.595333 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:20 crc kubenswrapper[4932]: E1125 09:13:20.597494 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:20 crc kubenswrapper[4932]: E1125 09:13:20.597539 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovs-vswitchd" Nov 25 09:13:20 crc kubenswrapper[4932]: I1125 09:13:20.615921 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" path="/var/lib/kubelet/pods/84b6148f-b9bf-41ef-a1ba-c282f94882ee/volumes" Nov 25 09:13:25 crc kubenswrapper[4932]: E1125 09:13:25.591121 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:25 crc kubenswrapper[4932]: E1125 09:13:25.592237 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:25 crc kubenswrapper[4932]: E1125 09:13:25.592338 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:25 crc kubenswrapper[4932]: E1125 09:13:25.593075 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:25 crc kubenswrapper[4932]: E1125 09:13:25.593148 4932 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server" Nov 25 09:13:25 crc kubenswrapper[4932]: E1125 09:13:25.593504 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:25 crc kubenswrapper[4932]: E1125 09:13:25.595477 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:25 crc kubenswrapper[4932]: E1125 09:13:25.595524 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovs-vswitchd" Nov 25 09:13:27 crc kubenswrapper[4932]: E1125 09:13:27.003286 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:27 crc kubenswrapper[4932]: E1125 09:13:27.003708 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts podName:dbc1ab9c-f494-4ce9-8758-d5c724e4413a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:43.003681211 +0000 UTC m=+1483.129710814 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts") pod "barbican8b4d-account-delete-dprdr" (UID: "dbc1ab9c-f494-4ce9-8758-d5c724e4413a") : configmap "openstack-scripts" not found Nov 25 09:13:27 crc kubenswrapper[4932]: E1125 09:13:27.004253 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:27 crc kubenswrapper[4932]: E1125 09:13:27.004307 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts podName:2dac2ddd-1d32-406d-bb47-cbcb0bd71b71 nodeName:}" failed. No retries permitted until 2025-11-25 09:13:43.004290879 +0000 UTC m=+1483.130320472 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts") pod "novacell0a937-account-delete-czmhb" (UID: "2dac2ddd-1d32-406d-bb47-cbcb0bd71b71") : configmap "openstack-scripts" not found Nov 25 09:13:27 crc kubenswrapper[4932]: E1125 09:13:27.004352 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:27 crc kubenswrapper[4932]: E1125 09:13:27.004386 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts podName:838bc013-33ba-4722-be1d-b88c9016c83a nodeName:}" failed. No retries permitted until 2025-11-25 09:13:43.004374111 +0000 UTC m=+1483.130403704 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts") pod "novaapi7e0a-account-delete-drmkw" (UID: "838bc013-33ba-4722-be1d-b88c9016c83a") : configmap "openstack-scripts" not found Nov 25 09:13:27 crc kubenswrapper[4932]: E1125 09:13:27.104514 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:27 crc kubenswrapper[4932]: E1125 09:13:27.104646 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts podName:36140bfd-540f-40b6-8521-a8a3d408dc9d nodeName:}" failed. No retries permitted until 2025-11-25 09:13:43.104610669 +0000 UTC m=+1483.230640272 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts") pod "glance5148-account-delete-fbhmq" (UID: "36140bfd-540f-40b6-8521-a8a3d408dc9d") : configmap "openstack-scripts" not found Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.445865 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.527013 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-config\") pod \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.527164 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-public-tls-certs\") pod \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.527234 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-combined-ca-bundle\") pod \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.527264 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-ovndb-tls-certs\") pod \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.527371 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-internal-tls-certs\") pod \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.527404 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-httpd-config\") pod \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.527472 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhnks\" (UniqueName: \"kubernetes.io/projected/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-kube-api-access-xhnks\") pod \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\" (UID: \"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5\") " Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.535131 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" (UID: "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.536263 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-kube-api-access-xhnks" (OuterVolumeSpecName: "kube-api-access-xhnks") pod "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" (UID: "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5"). InnerVolumeSpecName "kube-api-access-xhnks". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.585272 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-config" (OuterVolumeSpecName: "config") pod "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" (UID: "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.590088 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" (UID: "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.605451 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" (UID: "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.607982 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" (UID: "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.628980 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhnks\" (UniqueName: \"kubernetes.io/projected/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-kube-api-access-xhnks\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.629023 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.629043 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.629060 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.629077 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.629094 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.631019 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" (UID: "d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.731531 4932 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.874585 4932 generic.go:334] "Generic (PLEG): container finished" podID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" containerID="ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997" exitCode=0 Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.874639 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5584db9bdf-rzbj9" event={"ID":"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5","Type":"ContainerDied","Data":"ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997"} Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.874665 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5584db9bdf-rzbj9" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.874691 4932 scope.go:117] "RemoveContainer" containerID="674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.874677 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5584db9bdf-rzbj9" event={"ID":"d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5","Type":"ContainerDied","Data":"3144248aa7e9b43532c2adb1175327527d7a9769c5d0faf83f42c181dafe5e0b"} Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.899919 4932 scope.go:117] "RemoveContainer" containerID="ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.909651 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5584db9bdf-rzbj9"] Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.916233 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5584db9bdf-rzbj9"] Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.921043 4932 scope.go:117] "RemoveContainer" containerID="674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47" Nov 25 09:13:28 crc kubenswrapper[4932]: E1125 09:13:28.921496 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47\": container with ID starting with 674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47 not found: ID does not exist" containerID="674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.921538 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47"} err="failed to get container status \"674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47\": rpc error: code = NotFound desc = could not find container \"674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47\": container with ID starting with 674dafea672ac56b3500e69ac76a49ed8e82d000a13efe25806964b677308b47 not found: ID does not exist" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.921564 4932 scope.go:117] "RemoveContainer" containerID="ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997" Nov 25 09:13:28 crc kubenswrapper[4932]: E1125 09:13:28.921844 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997\": container with ID starting with ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997 not found: ID does not exist" containerID="ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997" Nov 25 09:13:28 crc kubenswrapper[4932]: I1125 09:13:28.921889 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997"} err="failed to get container status \"ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997\": rpc error: code = NotFound desc = could not find container \"ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997\": container with ID starting with ba68ba734dd18b157dd6c1b55d4df42b267cec1e9022f424daf6d64877816997 not found: ID does not exist" Nov 25 09:13:30 crc kubenswrapper[4932]: E1125 09:13:30.589719 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:30 crc kubenswrapper[4932]: E1125 09:13:30.590478 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:30 crc kubenswrapper[4932]: E1125 09:13:30.590945 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:30 crc kubenswrapper[4932]: E1125 09:13:30.591025 4932 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server" Nov 25 09:13:30 crc kubenswrapper[4932]: E1125 09:13:30.591916 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:30 crc kubenswrapper[4932]: E1125 09:13:30.593893 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:30 crc kubenswrapper[4932]: E1125 09:13:30.596473 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:30 crc kubenswrapper[4932]: E1125 09:13:30.596550 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovs-vswitchd" Nov 25 09:13:30 crc kubenswrapper[4932]: I1125 09:13:30.620113 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" path="/var/lib/kubelet/pods/d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5/volumes" Nov 25 09:13:35 crc kubenswrapper[4932]: E1125 09:13:35.588490 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:35 crc kubenswrapper[4932]: E1125 09:13:35.589577 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:35 crc kubenswrapper[4932]: E1125 09:13:35.590031 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 25 09:13:35 crc kubenswrapper[4932]: E1125 09:13:35.590108 4932 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server" Nov 25 09:13:35 crc kubenswrapper[4932]: E1125 09:13:35.591318 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:35 crc kubenswrapper[4932]: E1125 09:13:35.593724 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:35 crc kubenswrapper[4932]: E1125 09:13:35.595891 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 25 09:13:35 crc kubenswrapper[4932]: E1125 09:13:35.595941 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-drcqj" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovs-vswitchd" Nov 25 09:13:35 crc kubenswrapper[4932]: I1125 09:13:35.985770 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-drcqj_257c86ab-2577-4d46-bdb3-1ec56da0d21e/ovs-vswitchd/0.log" Nov 25 09:13:35 crc kubenswrapper[4932]: I1125 09:13:35.999295 4932 generic.go:334] "Generic (PLEG): container finished" podID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" exitCode=137 Nov 25 09:13:35 crc kubenswrapper[4932]: I1125 09:13:35.999387 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-drcqj" event={"ID":"257c86ab-2577-4d46-bdb3-1ec56da0d21e","Type":"ContainerDied","Data":"18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16"} Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.021024 4932 generic.go:334] "Generic (PLEG): container finished" podID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerID="a9a48f9fe27c63900394a2e67fd1df3228736d3ba3410cb4defb99fc16d721f1" exitCode=137 Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.021081 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"a9a48f9fe27c63900394a2e67fd1df3228736d3ba3410cb4defb99fc16d721f1"} Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.040819 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.172740 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skkg2\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-kube-api-access-skkg2\") pod \"81ccee4a-f414-4007-ae17-b440b55dea5f\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.172844 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-cache\") pod \"81ccee4a-f414-4007-ae17-b440b55dea5f\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.172941 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift\") pod \"81ccee4a-f414-4007-ae17-b440b55dea5f\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.172997 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"81ccee4a-f414-4007-ae17-b440b55dea5f\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.173090 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-lock\") pod \"81ccee4a-f414-4007-ae17-b440b55dea5f\" (UID: \"81ccee4a-f414-4007-ae17-b440b55dea5f\") " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.174540 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-lock" (OuterVolumeSpecName: "lock") pod "81ccee4a-f414-4007-ae17-b440b55dea5f" (UID: "81ccee4a-f414-4007-ae17-b440b55dea5f"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.177484 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-cache" (OuterVolumeSpecName: "cache") pod "81ccee4a-f414-4007-ae17-b440b55dea5f" (UID: "81ccee4a-f414-4007-ae17-b440b55dea5f"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.190334 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "swift") pod "81ccee4a-f414-4007-ae17-b440b55dea5f" (UID: "81ccee4a-f414-4007-ae17-b440b55dea5f"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.190693 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-kube-api-access-skkg2" (OuterVolumeSpecName: "kube-api-access-skkg2") pod "81ccee4a-f414-4007-ae17-b440b55dea5f" (UID: "81ccee4a-f414-4007-ae17-b440b55dea5f"). InnerVolumeSpecName "kube-api-access-skkg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.191212 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "81ccee4a-f414-4007-ae17-b440b55dea5f" (UID: "81ccee4a-f414-4007-ae17-b440b55dea5f"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.240433 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-drcqj_257c86ab-2577-4d46-bdb3-1ec56da0d21e/ovs-vswitchd/0.log" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.241258 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.275176 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skkg2\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-kube-api-access-skkg2\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.275223 4932 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-cache\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.275236 4932 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/81ccee4a-f414-4007-ae17-b440b55dea5f-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.275266 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.275276 4932 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/81ccee4a-f414-4007-ae17-b440b55dea5f-lock\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.301996 4932 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.375999 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftg54\" (UniqueName: \"kubernetes.io/projected/257c86ab-2577-4d46-bdb3-1ec56da0d21e-kube-api-access-ftg54\") pod \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.376105 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-etc-ovs\") pod \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.376217 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-lib\") pod \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.376273 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-run\") pod \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.376332 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/257c86ab-2577-4d46-bdb3-1ec56da0d21e-scripts\") pod \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.376378 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-log\") pod \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\" (UID: \"257c86ab-2577-4d46-bdb3-1ec56da0d21e\") " Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.376886 4932 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.376971 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-log" (OuterVolumeSpecName: "var-log") pod "257c86ab-2577-4d46-bdb3-1ec56da0d21e" (UID: "257c86ab-2577-4d46-bdb3-1ec56da0d21e"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.377427 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-lib" (OuterVolumeSpecName: "var-lib") pod "257c86ab-2577-4d46-bdb3-1ec56da0d21e" (UID: "257c86ab-2577-4d46-bdb3-1ec56da0d21e"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.377517 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "257c86ab-2577-4d46-bdb3-1ec56da0d21e" (UID: "257c86ab-2577-4d46-bdb3-1ec56da0d21e"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.377570 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-run" (OuterVolumeSpecName: "var-run") pod "257c86ab-2577-4d46-bdb3-1ec56da0d21e" (UID: "257c86ab-2577-4d46-bdb3-1ec56da0d21e"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.379808 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/257c86ab-2577-4d46-bdb3-1ec56da0d21e-scripts" (OuterVolumeSpecName: "scripts") pod "257c86ab-2577-4d46-bdb3-1ec56da0d21e" (UID: "257c86ab-2577-4d46-bdb3-1ec56da0d21e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.380209 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/257c86ab-2577-4d46-bdb3-1ec56da0d21e-kube-api-access-ftg54" (OuterVolumeSpecName: "kube-api-access-ftg54") pod "257c86ab-2577-4d46-bdb3-1ec56da0d21e" (UID: "257c86ab-2577-4d46-bdb3-1ec56da0d21e"). InnerVolumeSpecName "kube-api-access-ftg54". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.478440 4932 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-lib\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.478473 4932 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.478482 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/257c86ab-2577-4d46-bdb3-1ec56da0d21e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.478490 4932 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.478500 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftg54\" (UniqueName: \"kubernetes.io/projected/257c86ab-2577-4d46-bdb3-1ec56da0d21e-kube-api-access-ftg54\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:36 crc kubenswrapper[4932]: I1125 09:13:36.478508 4932 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/257c86ab-2577-4d46-bdb3-1ec56da0d21e-etc-ovs\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.038702 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-drcqj_257c86ab-2577-4d46-bdb3-1ec56da0d21e/ovs-vswitchd/0.log" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.039908 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-drcqj" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.040428 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-drcqj" event={"ID":"257c86ab-2577-4d46-bdb3-1ec56da0d21e","Type":"ContainerDied","Data":"03fd4299e59567672b47d27cf05ce679f7e2241bbc02e1ffda50ff705571054d"} Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.040509 4932 scope.go:117] "RemoveContainer" containerID="18304cb0e1cf82f2f033885b601f150f43213e7c92e2b4ac76d42b2818fa5d16" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.057767 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"81ccee4a-f414-4007-ae17-b440b55dea5f","Type":"ContainerDied","Data":"c81103b32956702122f1da404fdef6de8f7bedd7bcba09ad78af06eea03a4c2e"} Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.057953 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.064247 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-drcqj"] Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.070567 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-drcqj"] Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.080412 4932 scope.go:117] "RemoveContainer" containerID="120afe2bb607d9bc58811e218a403d37513c0c64339910ca60c51afa8cb91f99" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.083632 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.090704 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.098778 4932 scope.go:117] "RemoveContainer" containerID="b3d7606c018f09548171e8fb637356b80b8ace12213f992a7cbed584f9881e91" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.120077 4932 scope.go:117] "RemoveContainer" containerID="5ae59b9454a13af8d9b95946eddb65b72cf6eb58ca8f5a5c793ecae3ee358a2f" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.140251 4932 scope.go:117] "RemoveContainer" containerID="8418a97dfcdcedf4b5696213eb9548d1ede0f2e23cfc955f8dc8202263735b8a" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.155535 4932 scope.go:117] "RemoveContainer" containerID="568484e69b9c4127e15f009bf0e5694d15ca4b6ae5a35b7503f084c9adb3e9a3" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.173891 4932 scope.go:117] "RemoveContainer" containerID="d77ea0e7a1509cc988fded84ce9cd4dc66e884a9b6f07ad09301588d2897762e" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.181627 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.181681 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.194102 4932 scope.go:117] "RemoveContainer" containerID="a9a48f9fe27c63900394a2e67fd1df3228736d3ba3410cb4defb99fc16d721f1" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.216306 4932 scope.go:117] "RemoveContainer" containerID="78edf79de3cfd571e1fec0bd599680cc34a039f29b8fc703497738f0cf348ad8" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.238489 4932 scope.go:117] "RemoveContainer" containerID="07e4106840372eb90e9a0d57a59631587d0bde7ac43d138cc5d5ec8a10885a84" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.266068 4932 scope.go:117] "RemoveContainer" containerID="06eca31abe1c59ab7a8cb701e4a49245773b3f648c29cdbe3b219489466d8705" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.294646 4932 scope.go:117] "RemoveContainer" containerID="5a51256b52321de55e8f314f90f9c0ba2bb1175fd2f75c69c2bef2451a36ec18" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.316365 4932 scope.go:117] "RemoveContainer" containerID="0a8e07a6bdc220d1412d97c2b357bdc14500c61e6d93f00a0915d134c315a151" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.382523 4932 scope.go:117] "RemoveContainer" containerID="0e9f1ea09136d57750420bc0ce46abbfd67cd0b1239ce71468be11a57e791720" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.402772 4932 scope.go:117] "RemoveContainer" containerID="a45a7147f788b51504a339167827aa53fdb2e4a2d35f004cd41d1718e61f00a0" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.421238 4932 scope.go:117] "RemoveContainer" containerID="8d2c29a0a166c6ebbf9113b41e4e2ba9e248c36be93456a40622f2d5fcc2066e" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.440882 4932 scope.go:117] "RemoveContainer" containerID="a5d5ffb7d109b7b5eac7b58236b7451c24c1c17dca222fe9b3d425e19a748cf5" Nov 25 09:13:37 crc kubenswrapper[4932]: I1125 09:13:37.462090 4932 scope.go:117] "RemoveContainer" containerID="00edaf8b62c16ee50bbf819b1838d3ce3fd0a27605f5823b0347afa99c531c70" Nov 25 09:13:38 crc kubenswrapper[4932]: I1125 09:13:38.624067 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" path="/var/lib/kubelet/pods/257c86ab-2577-4d46-bdb3-1ec56da0d21e/volumes" Nov 25 09:13:38 crc kubenswrapper[4932]: I1125 09:13:38.626436 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" path="/var/lib/kubelet/pods/81ccee4a-f414-4007-ae17-b440b55dea5f/volumes" Nov 25 09:13:43 crc kubenswrapper[4932]: E1125 09:13:43.006167 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:43 crc kubenswrapper[4932]: E1125 09:13:43.006648 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts podName:2dac2ddd-1d32-406d-bb47-cbcb0bd71b71 nodeName:}" failed. No retries permitted until 2025-11-25 09:14:15.006618486 +0000 UTC m=+1515.132648089 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts") pod "novacell0a937-account-delete-czmhb" (UID: "2dac2ddd-1d32-406d-bb47-cbcb0bd71b71") : configmap "openstack-scripts" not found Nov 25 09:13:43 crc kubenswrapper[4932]: E1125 09:13:43.006203 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:43 crc kubenswrapper[4932]: E1125 09:13:43.006813 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts podName:dbc1ab9c-f494-4ce9-8758-d5c724e4413a nodeName:}" failed. No retries permitted until 2025-11-25 09:14:15.006782111 +0000 UTC m=+1515.132811714 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts") pod "barbican8b4d-account-delete-dprdr" (UID: "dbc1ab9c-f494-4ce9-8758-d5c724e4413a") : configmap "openstack-scripts" not found Nov 25 09:13:43 crc kubenswrapper[4932]: E1125 09:13:43.006240 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:43 crc kubenswrapper[4932]: E1125 09:13:43.006870 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts podName:838bc013-33ba-4722-be1d-b88c9016c83a nodeName:}" failed. No retries permitted until 2025-11-25 09:14:15.006857113 +0000 UTC m=+1515.132886716 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts") pod "novaapi7e0a-account-delete-drmkw" (UID: "838bc013-33ba-4722-be1d-b88c9016c83a") : configmap "openstack-scripts" not found Nov 25 09:13:43 crc kubenswrapper[4932]: E1125 09:13:43.108138 4932 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 25 09:13:43 crc kubenswrapper[4932]: E1125 09:13:43.108272 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts podName:36140bfd-540f-40b6-8521-a8a3d408dc9d nodeName:}" failed. No retries permitted until 2025-11-25 09:14:15.108244674 +0000 UTC m=+1515.234274247 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts") pod "glance5148-account-delete-fbhmq" (UID: "36140bfd-540f-40b6-8521-a8a3d408dc9d") : configmap "openstack-scripts" not found Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.177794 4932 generic.go:334] "Generic (PLEG): container finished" podID="838bc013-33ba-4722-be1d-b88c9016c83a" containerID="9b476db280639f6d81d327b4980e19692d4945304938d0452dc6f72cbad0dc3c" exitCode=137 Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.177955 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi7e0a-account-delete-drmkw" event={"ID":"838bc013-33ba-4722-be1d-b88c9016c83a","Type":"ContainerDied","Data":"9b476db280639f6d81d327b4980e19692d4945304938d0452dc6f72cbad0dc3c"} Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.179550 4932 generic.go:334] "Generic (PLEG): container finished" podID="2dac2ddd-1d32-406d-bb47-cbcb0bd71b71" containerID="9b60a7120977a356bc3ba5ed856d8a02152e5c7b2b97a474909ee1d09c5d2fa5" exitCode=137 Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.179598 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0a937-account-delete-czmhb" event={"ID":"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71","Type":"ContainerDied","Data":"9b60a7120977a356bc3ba5ed856d8a02152e5c7b2b97a474909ee1d09c5d2fa5"} Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.180735 4932 generic.go:334] "Generic (PLEG): container finished" podID="dbc1ab9c-f494-4ce9-8758-d5c724e4413a" containerID="c17d2b7a60e488f01f5b61b845be2ae08ecd0dcb78cfdb75a8b72ffb6d34fa27" exitCode=137 Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.180820 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican8b4d-account-delete-dprdr" event={"ID":"dbc1ab9c-f494-4ce9-8758-d5c724e4413a","Type":"ContainerDied","Data":"c17d2b7a60e488f01f5b61b845be2ae08ecd0dcb78cfdb75a8b72ffb6d34fa27"} Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.180878 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican8b4d-account-delete-dprdr" event={"ID":"dbc1ab9c-f494-4ce9-8758-d5c724e4413a","Type":"ContainerDied","Data":"20483f875de433df9cd085258b11bc2dcdce538a7d297cd953e26322a9c7d47d"} Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.180906 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20483f875de433df9cd085258b11bc2dcdce538a7d297cd953e26322a9c7d47d" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.181918 4932 generic.go:334] "Generic (PLEG): container finished" podID="36140bfd-540f-40b6-8521-a8a3d408dc9d" containerID="b3077aa432072fd3e5326ed9fb2a90716d917a8e76bd378c56a62bc655717477" exitCode=137 Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.181943 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance5148-account-delete-fbhmq" event={"ID":"36140bfd-540f-40b6-8521-a8a3d408dc9d","Type":"ContainerDied","Data":"b3077aa432072fd3e5326ed9fb2a90716d917a8e76bd378c56a62bc655717477"} Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.181964 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance5148-account-delete-fbhmq" event={"ID":"36140bfd-540f-40b6-8521-a8a3d408dc9d","Type":"ContainerDied","Data":"9da538ff24693cc88be6d797f79f0c525f21d6f780f1b556685c1ae89c18e1f3"} Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.181975 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9da538ff24693cc88be6d797f79f0c525f21d6f780f1b556685c1ae89c18e1f3" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.185712 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance5148-account-delete-fbhmq" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.192288 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican8b4d-account-delete-dprdr" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.244814 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts\") pod \"dbc1ab9c-f494-4ce9-8758-d5c724e4413a\" (UID: \"dbc1ab9c-f494-4ce9-8758-d5c724e4413a\") " Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.245283 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vvr7j\" (UniqueName: \"kubernetes.io/projected/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-kube-api-access-vvr7j\") pod \"dbc1ab9c-f494-4ce9-8758-d5c724e4413a\" (UID: \"dbc1ab9c-f494-4ce9-8758-d5c724e4413a\") " Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.245371 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kcmd\" (UniqueName: \"kubernetes.io/projected/36140bfd-540f-40b6-8521-a8a3d408dc9d-kube-api-access-5kcmd\") pod \"36140bfd-540f-40b6-8521-a8a3d408dc9d\" (UID: \"36140bfd-540f-40b6-8521-a8a3d408dc9d\") " Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.245513 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts\") pod \"36140bfd-540f-40b6-8521-a8a3d408dc9d\" (UID: \"36140bfd-540f-40b6-8521-a8a3d408dc9d\") " Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.245679 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dbc1ab9c-f494-4ce9-8758-d5c724e4413a" (UID: "dbc1ab9c-f494-4ce9-8758-d5c724e4413a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.246383 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.246783 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "36140bfd-540f-40b6-8521-a8a3d408dc9d" (UID: "36140bfd-540f-40b6-8521-a8a3d408dc9d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.252574 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-kube-api-access-vvr7j" (OuterVolumeSpecName: "kube-api-access-vvr7j") pod "dbc1ab9c-f494-4ce9-8758-d5c724e4413a" (UID: "dbc1ab9c-f494-4ce9-8758-d5c724e4413a"). InnerVolumeSpecName "kube-api-access-vvr7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.252622 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36140bfd-540f-40b6-8521-a8a3d408dc9d-kube-api-access-5kcmd" (OuterVolumeSpecName: "kube-api-access-5kcmd") pod "36140bfd-540f-40b6-8521-a8a3d408dc9d" (UID: "36140bfd-540f-40b6-8521-a8a3d408dc9d"). InnerVolumeSpecName "kube-api-access-5kcmd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.265137 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi7e0a-account-delete-drmkw" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.347398 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hj78\" (UniqueName: \"kubernetes.io/projected/838bc013-33ba-4722-be1d-b88c9016c83a-kube-api-access-5hj78\") pod \"838bc013-33ba-4722-be1d-b88c9016c83a\" (UID: \"838bc013-33ba-4722-be1d-b88c9016c83a\") " Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.347481 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts\") pod \"838bc013-33ba-4722-be1d-b88c9016c83a\" (UID: \"838bc013-33ba-4722-be1d-b88c9016c83a\") " Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.347767 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/36140bfd-540f-40b6-8521-a8a3d408dc9d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.347782 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vvr7j\" (UniqueName: \"kubernetes.io/projected/dbc1ab9c-f494-4ce9-8758-d5c724e4413a-kube-api-access-vvr7j\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.347793 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kcmd\" (UniqueName: \"kubernetes.io/projected/36140bfd-540f-40b6-8521-a8a3d408dc9d-kube-api-access-5kcmd\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.348553 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "838bc013-33ba-4722-be1d-b88c9016c83a" (UID: "838bc013-33ba-4722-be1d-b88c9016c83a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.349827 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/838bc013-33ba-4722-be1d-b88c9016c83a-kube-api-access-5hj78" (OuterVolumeSpecName: "kube-api-access-5hj78") pod "838bc013-33ba-4722-be1d-b88c9016c83a" (UID: "838bc013-33ba-4722-be1d-b88c9016c83a"). InnerVolumeSpecName "kube-api-access-5hj78". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.379710 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0a937-account-delete-czmhb" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.448243 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts\") pod \"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71\" (UID: \"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71\") " Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.448415 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndhwk\" (UniqueName: \"kubernetes.io/projected/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-kube-api-access-ndhwk\") pod \"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71\" (UID: \"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71\") " Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.448707 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hj78\" (UniqueName: \"kubernetes.io/projected/838bc013-33ba-4722-be1d-b88c9016c83a-kube-api-access-5hj78\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.448744 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/838bc013-33ba-4722-be1d-b88c9016c83a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.449624 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2dac2ddd-1d32-406d-bb47-cbcb0bd71b71" (UID: "2dac2ddd-1d32-406d-bb47-cbcb0bd71b71"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.454717 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-kube-api-access-ndhwk" (OuterVolumeSpecName: "kube-api-access-ndhwk") pod "2dac2ddd-1d32-406d-bb47-cbcb0bd71b71" (UID: "2dac2ddd-1d32-406d-bb47-cbcb0bd71b71"). InnerVolumeSpecName "kube-api-access-ndhwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.549974 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ndhwk\" (UniqueName: \"kubernetes.io/projected/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-kube-api-access-ndhwk\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:45 crc kubenswrapper[4932]: I1125 09:13:45.550029 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.198275 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi7e0a-account-delete-drmkw" Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.198269 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi7e0a-account-delete-drmkw" event={"ID":"838bc013-33ba-4722-be1d-b88c9016c83a","Type":"ContainerDied","Data":"6ccbe14fa5d6adc329e2bc92ddd3490d42d4b5b61e6ac29bbebcbaafa5b962e2"} Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.199067 4932 scope.go:117] "RemoveContainer" containerID="9b476db280639f6d81d327b4980e19692d4945304938d0452dc6f72cbad0dc3c" Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.202328 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican8b4d-account-delete-dprdr" Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.202610 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0a937-account-delete-czmhb" Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.202638 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance5148-account-delete-fbhmq" Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.202377 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0a937-account-delete-czmhb" event={"ID":"2dac2ddd-1d32-406d-bb47-cbcb0bd71b71","Type":"ContainerDied","Data":"73ab8f53236afbf1b63ad6d5ee094552e53704f8d58923766e8b1674e1978978"} Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.267463 4932 scope.go:117] "RemoveContainer" containerID="9b60a7120977a356bc3ba5ed856d8a02152e5c7b2b97a474909ee1d09c5d2fa5" Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.272838 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi7e0a-account-delete-drmkw"] Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.281407 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapi7e0a-account-delete-drmkw"] Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.300580 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican8b4d-account-delete-dprdr"] Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.306397 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican8b4d-account-delete-dprdr"] Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.310750 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0a937-account-delete-czmhb"] Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.314761 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell0a937-account-delete-czmhb"] Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.322776 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance5148-account-delete-fbhmq"] Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.323917 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance5148-account-delete-fbhmq"] Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.620304 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dac2ddd-1d32-406d-bb47-cbcb0bd71b71" path="/var/lib/kubelet/pods/2dac2ddd-1d32-406d-bb47-cbcb0bd71b71/volumes" Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.621170 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36140bfd-540f-40b6-8521-a8a3d408dc9d" path="/var/lib/kubelet/pods/36140bfd-540f-40b6-8521-a8a3d408dc9d/volumes" Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.621925 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="838bc013-33ba-4722-be1d-b88c9016c83a" path="/var/lib/kubelet/pods/838bc013-33ba-4722-be1d-b88c9016c83a/volumes" Nov 25 09:13:46 crc kubenswrapper[4932]: I1125 09:13:46.622626 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbc1ab9c-f494-4ce9-8758-d5c724e4413a" path="/var/lib/kubelet/pods/dbc1ab9c-f494-4ce9-8758-d5c724e4413a/volumes" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.505917 4932 scope.go:117] "RemoveContainer" containerID="271dbf2fd905420d2a8bdde0013d1a75c5748c6abb55b77de54d92992a396a5e" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.550367 4932 scope.go:117] "RemoveContainer" containerID="e8736a0e8dcdf7ceb436e66a1a1a5deb43accae216dccd68fd9dabbd866226f7" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.587841 4932 scope.go:117] "RemoveContainer" containerID="81ce3f95c5fe6793c19ca6b7421368b5e4668cff6c46e8b0b4ca229de7c578d4" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.618484 4932 scope.go:117] "RemoveContainer" containerID="e0b8bc681c7a05963d3036a4a876cb360601a52500fc470f6948f123c43bc3f7" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.661378 4932 scope.go:117] "RemoveContainer" containerID="880948a1911a933720439d209f58cb43b934a39763a09f23f4e4d68f382e39f0" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.713278 4932 scope.go:117] "RemoveContainer" containerID="d05bcefea7f8aa60e1f076a234efeb904bc9f1cf040c4db74790ff54cf400d60" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.735658 4932 scope.go:117] "RemoveContainer" containerID="6cc06260f5de9f1255bd24848bb0fbd27b365ac50fad917fa272ef82e787fc83" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.755674 4932 scope.go:117] "RemoveContainer" containerID="bf51179534c5cb56fcf8c3a15f1fa6f1e73e732d074224a9cc7f262e16a5f982" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.776675 4932 scope.go:117] "RemoveContainer" containerID="ddc3cfbc12b56e6bea03e56f92c9f19e040a8c2c6c86835eb76d2890eeb52bb3" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.810078 4932 scope.go:117] "RemoveContainer" containerID="6cea57533e20d403e6c8f694564639eb3b31680060a5f8aa034062f0c2af90c0" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.835161 4932 scope.go:117] "RemoveContainer" containerID="00a6d2bdb345402225b10a7c036b02b9a0fa2648af0319f26198095fb2c8e5e9" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.865770 4932 scope.go:117] "RemoveContainer" containerID="4bdba0a0070629dc89bd75eb2cd967b02a72c0ae20ab32bae70fe717dc0a8d8d" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.888361 4932 scope.go:117] "RemoveContainer" containerID="3f08ff77620cbdd809125bb2f2203d8655b5c5662687eebc59d5533391ed8c1a" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.921791 4932 scope.go:117] "RemoveContainer" containerID="bc3a346b5f0cec5224a51e30b5104fc851f6b8aab105dbefbcf8a761ba43f86d" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.945015 4932 scope.go:117] "RemoveContainer" containerID="0d6aad4f35e29b72e657fd363bd7fc014cdc67271ceea4aafd0c3a3f5e8823a6" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.966804 4932 scope.go:117] "RemoveContainer" containerID="b0df38b4f04c201155a87a3a640ea8f360a9303ed174d877947a925eca63845d" Nov 25 09:14:03 crc kubenswrapper[4932]: I1125 09:14:03.999447 4932 scope.go:117] "RemoveContainer" containerID="59addd419d69845cf7dbc0a57d1607ea8827c861a9211b90e147c4ea470215f1" Nov 25 09:14:04 crc kubenswrapper[4932]: I1125 09:14:04.034280 4932 scope.go:117] "RemoveContainer" containerID="4d0ab2b44271d534e37d3f273c78fe12c0481dffe52307dead0cfca9ed609e63" Nov 25 09:14:04 crc kubenswrapper[4932]: I1125 09:14:04.051258 4932 scope.go:117] "RemoveContainer" containerID="a35354a8de08cea5ce51c600660523c41a46a48b0a5773eb2e39562647569bde" Nov 25 09:14:04 crc kubenswrapper[4932]: I1125 09:14:04.067828 4932 scope.go:117] "RemoveContainer" containerID="29ea220a319155118ee72222ef7879f0c6f85a5fbe3eeb194a6b2229582758e2" Nov 25 09:14:07 crc kubenswrapper[4932]: I1125 09:14:07.181038 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:14:07 crc kubenswrapper[4932]: I1125 09:14:07.181515 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:14:37 crc kubenswrapper[4932]: I1125 09:14:37.181372 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:14:37 crc kubenswrapper[4932]: I1125 09:14:37.182015 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:14:37 crc kubenswrapper[4932]: I1125 09:14:37.182080 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:14:37 crc kubenswrapper[4932]: I1125 09:14:37.183170 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:14:37 crc kubenswrapper[4932]: I1125 09:14:37.183398 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" gracePeriod=600 Nov 25 09:14:37 crc kubenswrapper[4932]: E1125 09:14:37.325136 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:14:37 crc kubenswrapper[4932]: I1125 09:14:37.842120 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" exitCode=0 Nov 25 09:14:37 crc kubenswrapper[4932]: I1125 09:14:37.842259 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d"} Nov 25 09:14:37 crc kubenswrapper[4932]: I1125 09:14:37.842573 4932 scope.go:117] "RemoveContainer" containerID="647e12679adbf63ee5c63458089dff922023eeb7cd99a634cbd8c2a9db9a0cd7" Nov 25 09:14:37 crc kubenswrapper[4932]: I1125 09:14:37.843549 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:14:37 crc kubenswrapper[4932]: E1125 09:14:37.843986 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:14:52 crc kubenswrapper[4932]: I1125 09:14:52.606726 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:14:52 crc kubenswrapper[4932]: E1125 09:14:52.607891 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.158462 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8"] Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159371 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovs-vswitchd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159400 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovs-vswitchd" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159429 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-auditor" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159444 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-auditor" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159469 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="swift-recon-cron" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159483 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="swift-recon-cron" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159511 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159559 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-server" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159584 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="ceilometer-notification-agent" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159603 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="ceilometer-notification-agent" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159633 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-updater" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159649 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-updater" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159708 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d586a3b8-c6b8-4c6e-aa6f-11797966d218" containerName="nova-cell1-conductor-conductor" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159725 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d586a3b8-c6b8-4c6e-aa6f-11797966d218" containerName="nova-cell1-conductor-conductor" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159753 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerName="ovn-northd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159767 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerName="ovn-northd" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159795 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="sg-core" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159810 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="sg-core" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159841 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc680bc2-b240-40b6-b77e-c0d264f283b3" containerName="memcached" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159855 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc680bc2-b240-40b6-b77e-c0d264f283b3" containerName="memcached" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159881 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7865402-5a21-44f9-9436-d5d1bab67a07" containerName="cinder-api-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159895 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7865402-5a21-44f9-9436-d5d1bab67a07" containerName="cinder-api-log" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159913 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159931 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-server" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159947 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="rsync" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159962 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="rsync" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.159984 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.159999 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-server" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160026 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-replicator" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160041 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-replicator" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160059 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" containerName="cinder-scheduler" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160074 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" containerName="cinder-scheduler" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160105 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1c39090-1743-40c3-95d5-71f5ca126c96" containerName="barbican-worker-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160121 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1c39090-1743-40c3-95d5-71f5ca126c96" containerName="barbican-worker-log" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160143 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-expirer" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160157 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-expirer" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160178 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" containerName="ovsdbserver-sb" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160223 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" containerName="ovsdbserver-sb" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160253 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160268 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160293 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160308 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160334 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dac2ddd-1d32-406d-bb47-cbcb0bd71b71" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160350 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dac2ddd-1d32-406d-bb47-cbcb0bd71b71" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160373 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1c39090-1743-40c3-95d5-71f5ca126c96" containerName="barbican-worker" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160390 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1c39090-1743-40c3-95d5-71f5ca126c96" containerName="barbican-worker" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160423 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" containerName="probe" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160438 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" containerName="probe" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160469 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="186ced68-a489-410c-afa6-d4d623c37fc1" containerName="nova-scheduler-scheduler" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160485 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="186ced68-a489-410c-afa6-d4d623c37fc1" containerName="nova-scheduler-scheduler" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160514 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerName="glance-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160529 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerName="glance-log" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160557 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31866cc1-ccc2-4ffc-8de9-4651a1aa41ad" containerName="kube-state-metrics" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160574 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="31866cc1-ccc2-4ffc-8de9-4651a1aa41ad" containerName="kube-state-metrics" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160598 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerName="barbican-api-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160614 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerName="barbican-api-log" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160643 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2023df73-6a92-4838-8d5e-31f533796950" containerName="galera" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160659 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="2023df73-6a92-4838-8d5e-31f533796950" containerName="galera" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160677 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8134265d-9da9-4607-8db8-98330608ba4c" containerName="galera" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160692 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8134265d-9da9-4607-8db8-98330608ba4c" containerName="galera" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160717 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="proxy-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160733 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="proxy-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160758 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5373bec8-828a-4e9b-b0fd-6a0ef84375de" containerName="ovsdbserver-nb" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160810 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5373bec8-828a-4e9b-b0fd-6a0ef84375de" containerName="ovsdbserver-nb" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160828 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-replicator" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160843 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-replicator" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160861 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160876 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-log" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160894 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-metadata" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160909 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-metadata" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160932 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31823923-9ce9-49e0-b4c1-42418d49918c" containerName="nova-api-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160947 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="31823923-9ce9-49e0-b4c1-42418d49918c" containerName="nova-api-api" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.160979 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-updater" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.160994 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-updater" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161015 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server-init" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161031 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server-init" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161055 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a83ee8ae-69d7-4ca5-ade1-9d2450880338" containerName="barbican-keystone-listener" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161070 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a83ee8ae-69d7-4ca5-ade1-9d2450880338" containerName="barbican-keystone-listener" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161096 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31823923-9ce9-49e0-b4c1-42418d49918c" containerName="nova-api-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161111 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="31823923-9ce9-49e0-b4c1-42418d49918c" containerName="nova-api-log" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161142 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36140bfd-540f-40b6-8521-a8a3d408dc9d" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161159 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="36140bfd-540f-40b6-8521-a8a3d408dc9d" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161217 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a83ee8ae-69d7-4ca5-ade1-9d2450880338" containerName="barbican-keystone-listener-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161235 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a83ee8ae-69d7-4ca5-ade1-9d2450880338" containerName="barbican-keystone-listener-log" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161263 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerName="proxy-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161278 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerName="proxy-server" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161305 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161320 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161346 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerName="registry-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161361 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerName="registry-server" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161384 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a181c094-1cf9-42bd-b038-cc8a6f437aa3" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161400 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a181c094-1cf9-42bd-b038-cc8a6f437aa3" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161423 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="838bc013-33ba-4722-be1d-b88c9016c83a" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161439 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="838bc013-33ba-4722-be1d-b88c9016c83a" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161463 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a1917d6-4455-4cf5-b932-a38584663b02" containerName="placement-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161480 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a1917d6-4455-4cf5-b932-a38584663b02" containerName="placement-log" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161505 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161522 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161546 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce711acf-071a-4387-8c42-e2f3f8c25df9" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161563 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce711acf-071a-4387-8c42-e2f3f8c25df9" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161584 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbc1ab9c-f494-4ce9-8758-d5c724e4413a" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161601 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbc1ab9c-f494-4ce9-8758-d5c724e4413a" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161626 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="969d317e-0787-44a8-8e27-554b0e887444" containerName="setup-container" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161643 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="969d317e-0787-44a8-8e27-554b0e887444" containerName="setup-container" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161665 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerName="extract-utilities" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161681 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerName="extract-utilities" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161703 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="969d317e-0787-44a8-8e27-554b0e887444" containerName="rabbitmq" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161718 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="969d317e-0787-44a8-8e27-554b0e887444" containerName="rabbitmq" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161739 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-replicator" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161755 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-replicator" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161778 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerName="barbican-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161796 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerName="barbican-api" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161823 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57cfb59-e562-4fb2-bfad-b4cf5382c45a" containerName="nova-cell0-conductor-conductor" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161839 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57cfb59-e562-4fb2-bfad-b4cf5382c45a" containerName="nova-cell0-conductor-conductor" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161873 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9855d3c-818d-4804-add2-d6b0fce52613" containerName="init" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161889 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9855d3c-818d-4804-add2-d6b0fce52613" containerName="init" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161916 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="633c3722-e337-4b6a-98fe-451ac451dd06" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161933 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="633c3722-e337-4b6a-98fe-451ac451dd06" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.161962 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerName="glance-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.161979 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerName="glance-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162008 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9855d3c-818d-4804-add2-d6b0fce52613" containerName="dnsmasq-dns" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162025 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9855d3c-818d-4804-add2-d6b0fce52613" containerName="dnsmasq-dns" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162050 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-auditor" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162066 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-auditor" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162094 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f41b25a4-f48e-4938-9c23-0d89751af6ae" containerName="rabbitmq" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162110 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f41b25a4-f48e-4938-9c23-0d89751af6ae" containerName="rabbitmq" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162128 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a1917d6-4455-4cf5-b932-a38584663b02" containerName="placement-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162144 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a1917d6-4455-4cf5-b932-a38584663b02" containerName="placement-api" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162170 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" containerName="neutron-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162220 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" containerName="neutron-api" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162252 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-reaper" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162302 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-reaper" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162325 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" containerName="neutron-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162344 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" containerName="neutron-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162371 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5101ae2-5106-48c7-9116-4c0e5ededb84" containerName="glance-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162387 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5101ae2-5106-48c7-9116-4c0e5ededb84" containerName="glance-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162418 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8134265d-9da9-4607-8db8-98330608ba4c" containerName="mysql-bootstrap" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162434 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8134265d-9da9-4607-8db8-98330608ba4c" containerName="mysql-bootstrap" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162460 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-auditor" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162476 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-auditor" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162507 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8153c48a-65e5-4525-b3ca-4dba83d94681" containerName="keystone-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162523 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8153c48a-65e5-4525-b3ca-4dba83d94681" containerName="keystone-api" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162544 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2023df73-6a92-4838-8d5e-31f533796950" containerName="mysql-bootstrap" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162560 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="2023df73-6a92-4838-8d5e-31f533796950" containerName="mysql-bootstrap" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162579 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b15edfd7-749d-45a4-9801-1eba98d77a5e" containerName="ovn-controller" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162595 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b15edfd7-749d-45a4-9801-1eba98d77a5e" containerName="ovn-controller" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162621 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5101ae2-5106-48c7-9116-4c0e5ededb84" containerName="glance-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162637 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5101ae2-5106-48c7-9116-4c0e5ededb84" containerName="glance-log" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162667 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="ceilometer-central-agent" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162705 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="ceilometer-central-agent" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162738 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerName="extract-content" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162754 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerName="extract-content" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162780 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f41b25a4-f48e-4938-9c23-0d89751af6ae" containerName="setup-container" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162799 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f41b25a4-f48e-4938-9c23-0d89751af6ae" containerName="setup-container" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162817 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7865402-5a21-44f9-9436-d5d1bab67a07" containerName="cinder-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162833 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7865402-5a21-44f9-9436-d5d1bab67a07" containerName="cinder-api" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162855 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5373bec8-828a-4e9b-b0fd-6a0ef84375de" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162872 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5373bec8-828a-4e9b-b0fd-6a0ef84375de" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162898 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c16a4087-2597-4662-880f-80a7a2a78ef2" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162915 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c16a4087-2597-4662-880f-80a7a2a78ef2" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: E1125 09:15:00.162935 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerName="proxy-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.162952 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerName="proxy-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163473 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1c39090-1743-40c3-95d5-71f5ca126c96" containerName="barbican-worker-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163549 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-reaper" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163579 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-replicator" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163616 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerName="glance-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163642 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163675 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="186ced68-a489-410c-afa6-d4d623c37fc1" containerName="nova-scheduler-scheduler" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163703 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="31823923-9ce9-49e0-b4c1-42418d49918c" containerName="nova-api-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163732 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-updater" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163756 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a1917d6-4455-4cf5-b932-a38584663b02" containerName="placement-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163786 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerName="ovn-northd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163809 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" containerName="probe" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163831 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7865402-5a21-44f9-9436-d5d1bab67a07" containerName="cinder-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163851 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="2023df73-6a92-4838-8d5e-31f533796950" containerName="galera" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163873 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="52ff2b1b-8756-4ec2-92b6-54c1d005d1cc" containerName="cinder-scheduler" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163896 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovsdb-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163928 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="5373bec8-828a-4e9b-b0fd-6a0ef84375de" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163951 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="31866cc1-ccc2-4ffc-8de9-4651a1aa41ad" containerName="kube-state-metrics" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.163978 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164001 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a1917d6-4455-4cf5-b932-a38584663b02" containerName="placement-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164023 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="rsync" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164043 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="ceilometer-notification-agent" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164064 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dac2ddd-1d32-406d-bb47-cbcb0bd71b71" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164080 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="36140bfd-540f-40b6-8521-a8a3d408dc9d" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164110 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-replicator" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164133 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="969d317e-0787-44a8-8e27-554b0e887444" containerName="rabbitmq" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164152 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-updater" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164182 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="31823923-9ce9-49e0-b4c1-42418d49918c" containerName="nova-api-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164246 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a83ee8ae-69d7-4ca5-ade1-9d2450880338" containerName="barbican-keystone-listener" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164278 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbc1ab9c-f494-4ce9-8758-d5c724e4413a" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164296 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9d818a0-17fd-44a2-8855-a6f847efe274" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164318 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerName="barbican-api-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164349 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" containerName="ovsdbserver-sb" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164367 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="633c3722-e337-4b6a-98fe-451ac451dd06" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164391 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f41b25a4-f48e-4938-9c23-0d89751af6ae" containerName="rabbitmq" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164416 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="account-auditor" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164436 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="proxy-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164457 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="swift-recon-cron" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164476 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1c39090-1743-40c3-95d5-71f5ca126c96" containerName="barbican-worker" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164499 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164528 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-auditor" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164551 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="257c86ab-2577-4d46-bdb3-1ec56da0d21e" containerName="ovs-vswitchd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164569 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerName="proxy-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164586 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="84b6148f-b9bf-41ef-a1ba-c282f94882ee" containerName="registry-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164616 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="5373bec8-828a-4e9b-b0fd-6a0ef84375de" containerName="ovsdbserver-nb" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164643 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d586a3b8-c6b8-4c6e-aa6f-11797966d218" containerName="nova-cell1-conductor-conductor" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164663 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164687 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b15edfd7-749d-45a4-9801-1eba98d77a5e" containerName="ovn-controller" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164712 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1e12e22-8a2c-4093-b9c5-7cc68348e0ee" containerName="barbican-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164730 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5101ae2-5106-48c7-9116-4c0e5ededb84" containerName="glance-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164745 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="sg-core" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164776 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c16a4087-2597-4662-880f-80a7a2a78ef2" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164803 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" containerName="neutron-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164825 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a181c094-1cf9-42bd-b038-cc8a6f437aa3" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164843 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="8153c48a-65e5-4525-b3ca-4dba83d94681" containerName="keystone-api" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164874 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a83ee8ae-69d7-4ca5-ade1-9d2450880338" containerName="barbican-keystone-listener-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164898 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7865402-5a21-44f9-9436-d5d1bab67a07" containerName="cinder-api-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164923 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="90c30cef-5376-4f4a-8d59-9ab6daff902d" containerName="nova-metadata-metadata" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164951 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="15f7fd9d-7a12-4f06-9f9e-d9e4d059039f" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164968 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8f8a8a6-2b2d-4f22-9ea5-dd0b3a4a8c61" containerName="glance-log" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.164991 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="838bc013-33ba-4722-be1d-b88c9016c83a" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165011 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9855d3c-818d-4804-add2-d6b0fce52613" containerName="dnsmasq-dns" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165033 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5891fee-c3c6-4a52-a07a-f9e1eea3b7e5" containerName="neutron-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165058 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce711acf-071a-4387-8c42-e2f3f8c25df9" containerName="mariadb-account-delete" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165085 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc680bc2-b240-40b6-b77e-c0d264f283b3" containerName="memcached" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165103 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="8134265d-9da9-4607-8db8-98330608ba4c" containerName="galera" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165134 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4bc00f1-7938-42cf-9e1b-3bd8b4b6d7ce" containerName="openstack-network-exporter" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165158 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-expirer" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165185 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5101ae2-5106-48c7-9116-4c0e5ededb84" containerName="glance-httpd" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165245 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="90db5718-c185-4863-888a-6cb41ca5339d" containerName="ceilometer-central-agent" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165270 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e365f51-6fe5-47b3-b183-5cf5cae5c65e" containerName="proxy-server" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165293 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="container-replicator" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165321 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="81ccee4a-f414-4007-ae17-b440b55dea5f" containerName="object-auditor" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.165347 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b57cfb59-e562-4fb2-bfad-b4cf5382c45a" containerName="nova-cell0-conductor-conductor" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.166316 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.173953 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.174283 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.175713 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8"] Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.280509 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aab3e506-147f-41f1-899e-013c5126dfea-config-volume\") pod \"collect-profiles-29401035-pm6g8\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.280549 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aab3e506-147f-41f1-899e-013c5126dfea-secret-volume\") pod \"collect-profiles-29401035-pm6g8\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.280573 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94qtw\" (UniqueName: \"kubernetes.io/projected/aab3e506-147f-41f1-899e-013c5126dfea-kube-api-access-94qtw\") pod \"collect-profiles-29401035-pm6g8\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.381426 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aab3e506-147f-41f1-899e-013c5126dfea-config-volume\") pod \"collect-profiles-29401035-pm6g8\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.381464 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aab3e506-147f-41f1-899e-013c5126dfea-secret-volume\") pod \"collect-profiles-29401035-pm6g8\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.381482 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94qtw\" (UniqueName: \"kubernetes.io/projected/aab3e506-147f-41f1-899e-013c5126dfea-kube-api-access-94qtw\") pod \"collect-profiles-29401035-pm6g8\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.382941 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aab3e506-147f-41f1-899e-013c5126dfea-config-volume\") pod \"collect-profiles-29401035-pm6g8\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.399857 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aab3e506-147f-41f1-899e-013c5126dfea-secret-volume\") pod \"collect-profiles-29401035-pm6g8\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.400434 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94qtw\" (UniqueName: \"kubernetes.io/projected/aab3e506-147f-41f1-899e-013c5126dfea-kube-api-access-94qtw\") pod \"collect-profiles-29401035-pm6g8\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.529994 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:00 crc kubenswrapper[4932]: I1125 09:15:00.799520 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8"] Nov 25 09:15:01 crc kubenswrapper[4932]: I1125 09:15:01.130363 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" event={"ID":"aab3e506-147f-41f1-899e-013c5126dfea","Type":"ContainerStarted","Data":"9d4cc229698d0944ed0ff1940bf0e8cd47f89a4a582470666c5c6b89f1fc3ffa"} Nov 25 09:15:01 crc kubenswrapper[4932]: I1125 09:15:01.130741 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" event={"ID":"aab3e506-147f-41f1-899e-013c5126dfea","Type":"ContainerStarted","Data":"abb74ba317f81d37b2eda03a04b28ad74d82926c48210119a483d4f3d0cdfe5c"} Nov 25 09:15:01 crc kubenswrapper[4932]: I1125 09:15:01.161550 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" podStartSLOduration=1.1615217 podStartE2EDuration="1.1615217s" podCreationTimestamp="2025-11-25 09:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 09:15:01.157641815 +0000 UTC m=+1561.283671388" watchObservedRunningTime="2025-11-25 09:15:01.1615217 +0000 UTC m=+1561.287551293" Nov 25 09:15:02 crc kubenswrapper[4932]: I1125 09:15:02.143959 4932 generic.go:334] "Generic (PLEG): container finished" podID="aab3e506-147f-41f1-899e-013c5126dfea" containerID="9d4cc229698d0944ed0ff1940bf0e8cd47f89a4a582470666c5c6b89f1fc3ffa" exitCode=0 Nov 25 09:15:02 crc kubenswrapper[4932]: I1125 09:15:02.144073 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" event={"ID":"aab3e506-147f-41f1-899e-013c5126dfea","Type":"ContainerDied","Data":"9d4cc229698d0944ed0ff1940bf0e8cd47f89a4a582470666c5c6b89f1fc3ffa"} Nov 25 09:15:03 crc kubenswrapper[4932]: I1125 09:15:03.516967 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:03 crc kubenswrapper[4932]: I1125 09:15:03.634108 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94qtw\" (UniqueName: \"kubernetes.io/projected/aab3e506-147f-41f1-899e-013c5126dfea-kube-api-access-94qtw\") pod \"aab3e506-147f-41f1-899e-013c5126dfea\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " Nov 25 09:15:03 crc kubenswrapper[4932]: I1125 09:15:03.634638 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aab3e506-147f-41f1-899e-013c5126dfea-secret-volume\") pod \"aab3e506-147f-41f1-899e-013c5126dfea\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " Nov 25 09:15:03 crc kubenswrapper[4932]: I1125 09:15:03.634762 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aab3e506-147f-41f1-899e-013c5126dfea-config-volume\") pod \"aab3e506-147f-41f1-899e-013c5126dfea\" (UID: \"aab3e506-147f-41f1-899e-013c5126dfea\") " Nov 25 09:15:03 crc kubenswrapper[4932]: I1125 09:15:03.635406 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aab3e506-147f-41f1-899e-013c5126dfea-config-volume" (OuterVolumeSpecName: "config-volume") pod "aab3e506-147f-41f1-899e-013c5126dfea" (UID: "aab3e506-147f-41f1-899e-013c5126dfea"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:15:03 crc kubenswrapper[4932]: I1125 09:15:03.642599 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aab3e506-147f-41f1-899e-013c5126dfea-kube-api-access-94qtw" (OuterVolumeSpecName: "kube-api-access-94qtw") pod "aab3e506-147f-41f1-899e-013c5126dfea" (UID: "aab3e506-147f-41f1-899e-013c5126dfea"). InnerVolumeSpecName "kube-api-access-94qtw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:15:03 crc kubenswrapper[4932]: I1125 09:15:03.643416 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aab3e506-147f-41f1-899e-013c5126dfea-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "aab3e506-147f-41f1-899e-013c5126dfea" (UID: "aab3e506-147f-41f1-899e-013c5126dfea"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:15:03 crc kubenswrapper[4932]: I1125 09:15:03.738580 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94qtw\" (UniqueName: \"kubernetes.io/projected/aab3e506-147f-41f1-899e-013c5126dfea-kube-api-access-94qtw\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:03 crc kubenswrapper[4932]: I1125 09:15:03.738625 4932 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/aab3e506-147f-41f1-899e-013c5126dfea-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:03 crc kubenswrapper[4932]: I1125 09:15:03.738644 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/aab3e506-147f-41f1-899e-013c5126dfea-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:15:04 crc kubenswrapper[4932]: I1125 09:15:04.176944 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" event={"ID":"aab3e506-147f-41f1-899e-013c5126dfea","Type":"ContainerDied","Data":"abb74ba317f81d37b2eda03a04b28ad74d82926c48210119a483d4f3d0cdfe5c"} Nov 25 09:15:04 crc kubenswrapper[4932]: I1125 09:15:04.177032 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="abb74ba317f81d37b2eda03a04b28ad74d82926c48210119a483d4f3d0cdfe5c" Nov 25 09:15:04 crc kubenswrapper[4932]: I1125 09:15:04.177100 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8" Nov 25 09:15:04 crc kubenswrapper[4932]: I1125 09:15:04.941734 4932 scope.go:117] "RemoveContainer" containerID="35b97667ea2f3c8f4d0ed84ea1858ea883985abf6696778a33a9ecd4a8814409" Nov 25 09:15:04 crc kubenswrapper[4932]: I1125 09:15:04.991800 4932 scope.go:117] "RemoveContainer" containerID="0e68c9c73f7ebc342beac473b5b54d359282071ad585cbf463817d807a73bca8" Nov 25 09:15:05 crc kubenswrapper[4932]: I1125 09:15:05.049382 4932 scope.go:117] "RemoveContainer" containerID="44c20a6528a98cf85e7eddf8dc1dcf7d3891a50d73c5bee0ebfb78f28694c629" Nov 25 09:15:05 crc kubenswrapper[4932]: I1125 09:15:05.074715 4932 scope.go:117] "RemoveContainer" containerID="04f68200aa484dc2dc939e490dcf4ff88ff89773a7c654ea9b8bce2bac1b8aaf" Nov 25 09:15:05 crc kubenswrapper[4932]: I1125 09:15:05.129427 4932 scope.go:117] "RemoveContainer" containerID="9819269c37912ac65e199eb5854e29f21125b062fca1aae014ea6ef312421df9" Nov 25 09:15:05 crc kubenswrapper[4932]: I1125 09:15:05.156896 4932 scope.go:117] "RemoveContainer" containerID="b2c7e77789303154985b5d572e2e645dc59426801021e02f4bfe4ed70e08243e" Nov 25 09:15:05 crc kubenswrapper[4932]: I1125 09:15:05.178705 4932 scope.go:117] "RemoveContainer" containerID="e5ddbce93438e379c217b355c8b70d4d14eedbb1679bbeaf3ff04b9b953a64d0" Nov 25 09:15:07 crc kubenswrapper[4932]: I1125 09:15:07.606547 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:15:07 crc kubenswrapper[4932]: E1125 09:15:07.607067 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:15:18 crc kubenswrapper[4932]: I1125 09:15:18.606922 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:15:18 crc kubenswrapper[4932]: E1125 09:15:18.608125 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:15:31 crc kubenswrapper[4932]: I1125 09:15:31.606633 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:15:31 crc kubenswrapper[4932]: E1125 09:15:31.607411 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:15:44 crc kubenswrapper[4932]: I1125 09:15:44.606553 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:15:44 crc kubenswrapper[4932]: E1125 09:15:44.607447 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:15:58 crc kubenswrapper[4932]: I1125 09:15:58.606349 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:15:58 crc kubenswrapper[4932]: E1125 09:15:58.607647 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:16:05 crc kubenswrapper[4932]: I1125 09:16:05.344853 4932 scope.go:117] "RemoveContainer" containerID="abe66e4f341b24534642787b92c4263f4ebf66e15aa3b9d673ff051b62fba4b5" Nov 25 09:16:05 crc kubenswrapper[4932]: I1125 09:16:05.385466 4932 scope.go:117] "RemoveContainer" containerID="bcb0b33d20667e08d805c88572654c89aed61e0f969c78fc5ef9ec57be99532f" Nov 25 09:16:05 crc kubenswrapper[4932]: I1125 09:16:05.973540 4932 scope.go:117] "RemoveContainer" containerID="adf6076cc75094913a8f42ea8fcd7116b3db72502f1ce0681414098453d4e7ce" Nov 25 09:16:06 crc kubenswrapper[4932]: I1125 09:16:06.044966 4932 scope.go:117] "RemoveContainer" containerID="75148f61ebc41098dc7bed57c08e55401d84cfe49c16eea03690a213f89fa9e6" Nov 25 09:16:06 crc kubenswrapper[4932]: I1125 09:16:06.076298 4932 scope.go:117] "RemoveContainer" containerID="3cc44a4bb5bf8607df01e273c8abc8040ee6601acb99979bb6dd8efe2be567aa" Nov 25 09:16:06 crc kubenswrapper[4932]: I1125 09:16:06.095391 4932 scope.go:117] "RemoveContainer" containerID="f728aa03da6869a351a6020593c1a1aa2ec915e5a09d978c9e647881d5686d77" Nov 25 09:16:06 crc kubenswrapper[4932]: I1125 09:16:06.115733 4932 scope.go:117] "RemoveContainer" containerID="bb6122d938bb9d23fba3db816a5bc8cfafc993ae9ba9ebacdda64a4c57056966" Nov 25 09:16:06 crc kubenswrapper[4932]: I1125 09:16:06.139236 4932 scope.go:117] "RemoveContainer" containerID="f66e9f2ad343c3b0f6e212719d6e385729f097478aabe9d1af995fb102ee097e" Nov 25 09:16:06 crc kubenswrapper[4932]: I1125 09:16:06.160550 4932 scope.go:117] "RemoveContainer" containerID="a88145623badf6b75935e3c66fd27e243c456b8c75fa4980f833fcbd15313f78" Nov 25 09:16:06 crc kubenswrapper[4932]: I1125 09:16:06.180330 4932 scope.go:117] "RemoveContainer" containerID="53bcbc203394b3c852ba1c6182bc8eaf5e1970de1e3b7f900c6947cac59286d4" Nov 25 09:16:06 crc kubenswrapper[4932]: I1125 09:16:06.194816 4932 scope.go:117] "RemoveContainer" containerID="8861de718b935ea10a4da1b0bd44557dea4ff94e37f66b0d78a10585c74a92f6" Nov 25 09:16:06 crc kubenswrapper[4932]: I1125 09:16:06.213758 4932 scope.go:117] "RemoveContainer" containerID="044d1296bb65dfef08ae69e4e66aaaf33fef1c2827f6ab1bc28df180a80213c7" Nov 25 09:16:06 crc kubenswrapper[4932]: I1125 09:16:06.241762 4932 scope.go:117] "RemoveContainer" containerID="319658ac79c4fb4fcd46ed313645d6769272569f11fd5e0e78f4b23b5fcf4935" Nov 25 09:16:13 crc kubenswrapper[4932]: I1125 09:16:13.606627 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:16:13 crc kubenswrapper[4932]: E1125 09:16:13.607908 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:16:28 crc kubenswrapper[4932]: I1125 09:16:28.613031 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:16:28 crc kubenswrapper[4932]: E1125 09:16:28.614423 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:16:40 crc kubenswrapper[4932]: I1125 09:16:40.610706 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:16:40 crc kubenswrapper[4932]: E1125 09:16:40.611616 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:16:55 crc kubenswrapper[4932]: I1125 09:16:55.606413 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:16:55 crc kubenswrapper[4932]: E1125 09:16:55.607417 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:17:01 crc kubenswrapper[4932]: I1125 09:17:01.873552 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fw92p"] Nov 25 09:17:01 crc kubenswrapper[4932]: E1125 09:17:01.874826 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aab3e506-147f-41f1-899e-013c5126dfea" containerName="collect-profiles" Nov 25 09:17:01 crc kubenswrapper[4932]: I1125 09:17:01.874849 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="aab3e506-147f-41f1-899e-013c5126dfea" containerName="collect-profiles" Nov 25 09:17:01 crc kubenswrapper[4932]: I1125 09:17:01.875230 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="aab3e506-147f-41f1-899e-013c5126dfea" containerName="collect-profiles" Nov 25 09:17:01 crc kubenswrapper[4932]: I1125 09:17:01.877029 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:01 crc kubenswrapper[4932]: I1125 09:17:01.891039 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fw92p"] Nov 25 09:17:02 crc kubenswrapper[4932]: I1125 09:17:02.053556 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-catalog-content\") pod \"certified-operators-fw92p\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:02 crc kubenswrapper[4932]: I1125 09:17:02.053869 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9z8g\" (UniqueName: \"kubernetes.io/projected/887a5714-e658-4d6e-af56-151e97154570-kube-api-access-l9z8g\") pod \"certified-operators-fw92p\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:02 crc kubenswrapper[4932]: I1125 09:17:02.053967 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-utilities\") pod \"certified-operators-fw92p\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:02 crc kubenswrapper[4932]: I1125 09:17:02.155985 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-catalog-content\") pod \"certified-operators-fw92p\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:02 crc kubenswrapper[4932]: I1125 09:17:02.156518 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9z8g\" (UniqueName: \"kubernetes.io/projected/887a5714-e658-4d6e-af56-151e97154570-kube-api-access-l9z8g\") pod \"certified-operators-fw92p\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:02 crc kubenswrapper[4932]: I1125 09:17:02.156824 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-utilities\") pod \"certified-operators-fw92p\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:02 crc kubenswrapper[4932]: I1125 09:17:02.156978 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-catalog-content\") pod \"certified-operators-fw92p\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:02 crc kubenswrapper[4932]: I1125 09:17:02.157394 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-utilities\") pod \"certified-operators-fw92p\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:02 crc kubenswrapper[4932]: I1125 09:17:02.181012 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9z8g\" (UniqueName: \"kubernetes.io/projected/887a5714-e658-4d6e-af56-151e97154570-kube-api-access-l9z8g\") pod \"certified-operators-fw92p\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:02 crc kubenswrapper[4932]: I1125 09:17:02.213666 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:02 crc kubenswrapper[4932]: I1125 09:17:02.710989 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fw92p"] Nov 25 09:17:02 crc kubenswrapper[4932]: W1125 09:17:02.717296 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod887a5714_e658_4d6e_af56_151e97154570.slice/crio-d7b9cb7df59e006921be57f4fa00a202a0916dc237bd5aa5e01a45f0cc1832e8 WatchSource:0}: Error finding container d7b9cb7df59e006921be57f4fa00a202a0916dc237bd5aa5e01a45f0cc1832e8: Status 404 returned error can't find the container with id d7b9cb7df59e006921be57f4fa00a202a0916dc237bd5aa5e01a45f0cc1832e8 Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.047047 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jk29v"] Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.049605 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.064437 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jk29v"] Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.171654 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfznm\" (UniqueName: \"kubernetes.io/projected/d906346e-51e1-4db9-ae2e-f252dc08e93a-kube-api-access-tfznm\") pod \"community-operators-jk29v\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.171714 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-utilities\") pod \"community-operators-jk29v\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.171753 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-catalog-content\") pod \"community-operators-jk29v\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.272956 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfznm\" (UniqueName: \"kubernetes.io/projected/d906346e-51e1-4db9-ae2e-f252dc08e93a-kube-api-access-tfznm\") pod \"community-operators-jk29v\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.273251 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-utilities\") pod \"community-operators-jk29v\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.273366 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-catalog-content\") pod \"community-operators-jk29v\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.273685 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-utilities\") pod \"community-operators-jk29v\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.273748 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-catalog-content\") pod \"community-operators-jk29v\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.298939 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfznm\" (UniqueName: \"kubernetes.io/projected/d906346e-51e1-4db9-ae2e-f252dc08e93a-kube-api-access-tfznm\") pod \"community-operators-jk29v\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.369158 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.538824 4932 generic.go:334] "Generic (PLEG): container finished" podID="887a5714-e658-4d6e-af56-151e97154570" containerID="ff6e46ed675d27273dde91a5efc502d4180d34d5bf8942d49d2075e73a9afabe" exitCode=0 Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.538980 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fw92p" event={"ID":"887a5714-e658-4d6e-af56-151e97154570","Type":"ContainerDied","Data":"ff6e46ed675d27273dde91a5efc502d4180d34d5bf8942d49d2075e73a9afabe"} Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.539131 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fw92p" event={"ID":"887a5714-e658-4d6e-af56-151e97154570","Type":"ContainerStarted","Data":"d7b9cb7df59e006921be57f4fa00a202a0916dc237bd5aa5e01a45f0cc1832e8"} Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.543462 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:17:03 crc kubenswrapper[4932]: I1125 09:17:03.912663 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jk29v"] Nov 25 09:17:04 crc kubenswrapper[4932]: I1125 09:17:04.548245 4932 generic.go:334] "Generic (PLEG): container finished" podID="d906346e-51e1-4db9-ae2e-f252dc08e93a" containerID="cc0fd8c4cfed7a76fd2b18a0f168dd9c6dd5d3dbf65c49caa23a0c52514298f2" exitCode=0 Nov 25 09:17:04 crc kubenswrapper[4932]: I1125 09:17:04.548338 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jk29v" event={"ID":"d906346e-51e1-4db9-ae2e-f252dc08e93a","Type":"ContainerDied","Data":"cc0fd8c4cfed7a76fd2b18a0f168dd9c6dd5d3dbf65c49caa23a0c52514298f2"} Nov 25 09:17:04 crc kubenswrapper[4932]: I1125 09:17:04.548374 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jk29v" event={"ID":"d906346e-51e1-4db9-ae2e-f252dc08e93a","Type":"ContainerStarted","Data":"e91386ebba5e31774d9451f20ebeacb1721e7fc7a250d0d438f16d965edd3ade"} Nov 25 09:17:04 crc kubenswrapper[4932]: I1125 09:17:04.553065 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fw92p" event={"ID":"887a5714-e658-4d6e-af56-151e97154570","Type":"ContainerStarted","Data":"932c0eca1dc837e53c44a8ded75975f5f78a266b06527001d028da3c58d7fa46"} Nov 25 09:17:06 crc kubenswrapper[4932]: I1125 09:17:06.395964 4932 scope.go:117] "RemoveContainer" containerID="72f71cf73b9865b04d4d3de5c8547c8ca66dceb1900d89f1ff42c5d833013afd" Nov 25 09:17:06 crc kubenswrapper[4932]: I1125 09:17:06.422286 4932 scope.go:117] "RemoveContainer" containerID="c8ae9ee3aa8405ff65c9452ed08700eda42757ab1407937bb5f3003fe4cf7a9e" Nov 25 09:17:06 crc kubenswrapper[4932]: I1125 09:17:06.448369 4932 scope.go:117] "RemoveContainer" containerID="84c8cfeb381f864d67634b78621a1b7460c7087ecdf9baf7bdd83200605e31e2" Nov 25 09:17:06 crc kubenswrapper[4932]: I1125 09:17:06.466671 4932 scope.go:117] "RemoveContainer" containerID="4864acfa3a39774595813e1aa545d79fe92ca72c92dee0bda77fccdc3c6b3214" Nov 25 09:17:06 crc kubenswrapper[4932]: I1125 09:17:06.507052 4932 scope.go:117] "RemoveContainer" containerID="b8682f71f2ee6925b54df3f64b25f4f743542faa8879099318a3b2e0226e6888" Nov 25 09:17:06 crc kubenswrapper[4932]: I1125 09:17:06.526804 4932 scope.go:117] "RemoveContainer" containerID="3d4d2ece1e5eef9d1d0e16758fced0df7cde0583ea5a26d7bbd9fa814e5ca952" Nov 25 09:17:06 crc kubenswrapper[4932]: I1125 09:17:06.555715 4932 scope.go:117] "RemoveContainer" containerID="80df99d51a793387f4befd153965af902fa51eff5beea4589846bd522aef8f83" Nov 25 09:17:06 crc kubenswrapper[4932]: I1125 09:17:06.572399 4932 generic.go:334] "Generic (PLEG): container finished" podID="887a5714-e658-4d6e-af56-151e97154570" containerID="932c0eca1dc837e53c44a8ded75975f5f78a266b06527001d028da3c58d7fa46" exitCode=0 Nov 25 09:17:06 crc kubenswrapper[4932]: I1125 09:17:06.572521 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fw92p" event={"ID":"887a5714-e658-4d6e-af56-151e97154570","Type":"ContainerDied","Data":"932c0eca1dc837e53c44a8ded75975f5f78a266b06527001d028da3c58d7fa46"} Nov 25 09:17:06 crc kubenswrapper[4932]: I1125 09:17:06.587425 4932 generic.go:334] "Generic (PLEG): container finished" podID="d906346e-51e1-4db9-ae2e-f252dc08e93a" containerID="d61db3786beae778eba89a42860f503e7d55d4caddeb68ffcd237dfacecbb5ec" exitCode=0 Nov 25 09:17:06 crc kubenswrapper[4932]: I1125 09:17:06.587454 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jk29v" event={"ID":"d906346e-51e1-4db9-ae2e-f252dc08e93a","Type":"ContainerDied","Data":"d61db3786beae778eba89a42860f503e7d55d4caddeb68ffcd237dfacecbb5ec"} Nov 25 09:17:07 crc kubenswrapper[4932]: I1125 09:17:07.597824 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fw92p" event={"ID":"887a5714-e658-4d6e-af56-151e97154570","Type":"ContainerStarted","Data":"fe658c022384004d6365621a406d8b5245d9a97c397a83fbe60d8485417582de"} Nov 25 09:17:07 crc kubenswrapper[4932]: I1125 09:17:07.601457 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jk29v" event={"ID":"d906346e-51e1-4db9-ae2e-f252dc08e93a","Type":"ContainerStarted","Data":"7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32"} Nov 25 09:17:07 crc kubenswrapper[4932]: I1125 09:17:07.627836 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fw92p" podStartSLOduration=3.15968189 podStartE2EDuration="6.627805771s" podCreationTimestamp="2025-11-25 09:17:01 +0000 UTC" firstStartedPulling="2025-11-25 09:17:03.543238148 +0000 UTC m=+1683.669267711" lastFinishedPulling="2025-11-25 09:17:07.011362029 +0000 UTC m=+1687.137391592" observedRunningTime="2025-11-25 09:17:07.619327658 +0000 UTC m=+1687.745357261" watchObservedRunningTime="2025-11-25 09:17:07.627805771 +0000 UTC m=+1687.753835364" Nov 25 09:17:07 crc kubenswrapper[4932]: I1125 09:17:07.650022 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jk29v" podStartSLOduration=2.1438389 podStartE2EDuration="4.649996926s" podCreationTimestamp="2025-11-25 09:17:03 +0000 UTC" firstStartedPulling="2025-11-25 09:17:04.549688454 +0000 UTC m=+1684.675718037" lastFinishedPulling="2025-11-25 09:17:07.05584646 +0000 UTC m=+1687.181876063" observedRunningTime="2025-11-25 09:17:07.643361047 +0000 UTC m=+1687.769390650" watchObservedRunningTime="2025-11-25 09:17:07.649996926 +0000 UTC m=+1687.776026519" Nov 25 09:17:09 crc kubenswrapper[4932]: I1125 09:17:09.606407 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:17:09 crc kubenswrapper[4932]: E1125 09:17:09.607036 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:17:12 crc kubenswrapper[4932]: I1125 09:17:12.213858 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:12 crc kubenswrapper[4932]: I1125 09:17:12.213929 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:12 crc kubenswrapper[4932]: I1125 09:17:12.272058 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:12 crc kubenswrapper[4932]: I1125 09:17:12.736461 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:13 crc kubenswrapper[4932]: I1125 09:17:13.369987 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:13 crc kubenswrapper[4932]: I1125 09:17:13.370895 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:13 crc kubenswrapper[4932]: I1125 09:17:13.451630 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:13 crc kubenswrapper[4932]: I1125 09:17:13.763428 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:13 crc kubenswrapper[4932]: I1125 09:17:13.847143 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fw92p"] Nov 25 09:17:14 crc kubenswrapper[4932]: I1125 09:17:14.450043 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jk29v"] Nov 25 09:17:14 crc kubenswrapper[4932]: I1125 09:17:14.669239 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fw92p" podUID="887a5714-e658-4d6e-af56-151e97154570" containerName="registry-server" containerID="cri-o://fe658c022384004d6365621a406d8b5245d9a97c397a83fbe60d8485417582de" gracePeriod=2 Nov 25 09:17:15 crc kubenswrapper[4932]: I1125 09:17:15.680558 4932 generic.go:334] "Generic (PLEG): container finished" podID="887a5714-e658-4d6e-af56-151e97154570" containerID="fe658c022384004d6365621a406d8b5245d9a97c397a83fbe60d8485417582de" exitCode=0 Nov 25 09:17:15 crc kubenswrapper[4932]: I1125 09:17:15.680597 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fw92p" event={"ID":"887a5714-e658-4d6e-af56-151e97154570","Type":"ContainerDied","Data":"fe658c022384004d6365621a406d8b5245d9a97c397a83fbe60d8485417582de"} Nov 25 09:17:15 crc kubenswrapper[4932]: I1125 09:17:15.681081 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jk29v" podUID="d906346e-51e1-4db9-ae2e-f252dc08e93a" containerName="registry-server" containerID="cri-o://7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32" gracePeriod=2 Nov 25 09:17:15 crc kubenswrapper[4932]: I1125 09:17:15.867534 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.064027 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9z8g\" (UniqueName: \"kubernetes.io/projected/887a5714-e658-4d6e-af56-151e97154570-kube-api-access-l9z8g\") pod \"887a5714-e658-4d6e-af56-151e97154570\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.064448 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-catalog-content\") pod \"887a5714-e658-4d6e-af56-151e97154570\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.064650 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-utilities\") pod \"887a5714-e658-4d6e-af56-151e97154570\" (UID: \"887a5714-e658-4d6e-af56-151e97154570\") " Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.065422 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-utilities" (OuterVolumeSpecName: "utilities") pod "887a5714-e658-4d6e-af56-151e97154570" (UID: "887a5714-e658-4d6e-af56-151e97154570"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.079483 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/887a5714-e658-4d6e-af56-151e97154570-kube-api-access-l9z8g" (OuterVolumeSpecName: "kube-api-access-l9z8g") pod "887a5714-e658-4d6e-af56-151e97154570" (UID: "887a5714-e658-4d6e-af56-151e97154570"). InnerVolumeSpecName "kube-api-access-l9z8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.115742 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "887a5714-e658-4d6e-af56-151e97154570" (UID: "887a5714-e658-4d6e-af56-151e97154570"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.165860 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9z8g\" (UniqueName: \"kubernetes.io/projected/887a5714-e658-4d6e-af56-151e97154570-kube-api-access-l9z8g\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.165886 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.165895 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/887a5714-e658-4d6e-af56-151e97154570-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.203584 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.368866 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-catalog-content\") pod \"d906346e-51e1-4db9-ae2e-f252dc08e93a\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.368949 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-utilities\") pod \"d906346e-51e1-4db9-ae2e-f252dc08e93a\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.369128 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfznm\" (UniqueName: \"kubernetes.io/projected/d906346e-51e1-4db9-ae2e-f252dc08e93a-kube-api-access-tfznm\") pod \"d906346e-51e1-4db9-ae2e-f252dc08e93a\" (UID: \"d906346e-51e1-4db9-ae2e-f252dc08e93a\") " Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.370593 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-utilities" (OuterVolumeSpecName: "utilities") pod "d906346e-51e1-4db9-ae2e-f252dc08e93a" (UID: "d906346e-51e1-4db9-ae2e-f252dc08e93a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.378541 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d906346e-51e1-4db9-ae2e-f252dc08e93a-kube-api-access-tfznm" (OuterVolumeSpecName: "kube-api-access-tfznm") pod "d906346e-51e1-4db9-ae2e-f252dc08e93a" (UID: "d906346e-51e1-4db9-ae2e-f252dc08e93a"). InnerVolumeSpecName "kube-api-access-tfznm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.421469 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d906346e-51e1-4db9-ae2e-f252dc08e93a" (UID: "d906346e-51e1-4db9-ae2e-f252dc08e93a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.470784 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.470818 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d906346e-51e1-4db9-ae2e-f252dc08e93a-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.470831 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfznm\" (UniqueName: \"kubernetes.io/projected/d906346e-51e1-4db9-ae2e-f252dc08e93a-kube-api-access-tfznm\") on node \"crc\" DevicePath \"\"" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.692676 4932 generic.go:334] "Generic (PLEG): container finished" podID="d906346e-51e1-4db9-ae2e-f252dc08e93a" containerID="7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32" exitCode=0 Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.692743 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jk29v" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.692776 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jk29v" event={"ID":"d906346e-51e1-4db9-ae2e-f252dc08e93a","Type":"ContainerDied","Data":"7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32"} Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.692811 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jk29v" event={"ID":"d906346e-51e1-4db9-ae2e-f252dc08e93a","Type":"ContainerDied","Data":"e91386ebba5e31774d9451f20ebeacb1721e7fc7a250d0d438f16d965edd3ade"} Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.692836 4932 scope.go:117] "RemoveContainer" containerID="7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.697879 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fw92p" event={"ID":"887a5714-e658-4d6e-af56-151e97154570","Type":"ContainerDied","Data":"d7b9cb7df59e006921be57f4fa00a202a0916dc237bd5aa5e01a45f0cc1832e8"} Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.697997 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fw92p" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.721020 4932 scope.go:117] "RemoveContainer" containerID="d61db3786beae778eba89a42860f503e7d55d4caddeb68ffcd237dfacecbb5ec" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.727281 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jk29v"] Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.735573 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jk29v"] Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.746639 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fw92p"] Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.754670 4932 scope.go:117] "RemoveContainer" containerID="cc0fd8c4cfed7a76fd2b18a0f168dd9c6dd5d3dbf65c49caa23a0c52514298f2" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.754938 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fw92p"] Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.783006 4932 scope.go:117] "RemoveContainer" containerID="7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32" Nov 25 09:17:16 crc kubenswrapper[4932]: E1125 09:17:16.783446 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32\": container with ID starting with 7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32 not found: ID does not exist" containerID="7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.783482 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32"} err="failed to get container status \"7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32\": rpc error: code = NotFound desc = could not find container \"7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32\": container with ID starting with 7e2bb79f43e1bb78eba1d1330ad51e0b1727e684f251a19c44b669d52f9c7d32 not found: ID does not exist" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.783506 4932 scope.go:117] "RemoveContainer" containerID="d61db3786beae778eba89a42860f503e7d55d4caddeb68ffcd237dfacecbb5ec" Nov 25 09:17:16 crc kubenswrapper[4932]: E1125 09:17:16.783795 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d61db3786beae778eba89a42860f503e7d55d4caddeb68ffcd237dfacecbb5ec\": container with ID starting with d61db3786beae778eba89a42860f503e7d55d4caddeb68ffcd237dfacecbb5ec not found: ID does not exist" containerID="d61db3786beae778eba89a42860f503e7d55d4caddeb68ffcd237dfacecbb5ec" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.783839 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d61db3786beae778eba89a42860f503e7d55d4caddeb68ffcd237dfacecbb5ec"} err="failed to get container status \"d61db3786beae778eba89a42860f503e7d55d4caddeb68ffcd237dfacecbb5ec\": rpc error: code = NotFound desc = could not find container \"d61db3786beae778eba89a42860f503e7d55d4caddeb68ffcd237dfacecbb5ec\": container with ID starting with d61db3786beae778eba89a42860f503e7d55d4caddeb68ffcd237dfacecbb5ec not found: ID does not exist" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.783859 4932 scope.go:117] "RemoveContainer" containerID="cc0fd8c4cfed7a76fd2b18a0f168dd9c6dd5d3dbf65c49caa23a0c52514298f2" Nov 25 09:17:16 crc kubenswrapper[4932]: E1125 09:17:16.784098 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc0fd8c4cfed7a76fd2b18a0f168dd9c6dd5d3dbf65c49caa23a0c52514298f2\": container with ID starting with cc0fd8c4cfed7a76fd2b18a0f168dd9c6dd5d3dbf65c49caa23a0c52514298f2 not found: ID does not exist" containerID="cc0fd8c4cfed7a76fd2b18a0f168dd9c6dd5d3dbf65c49caa23a0c52514298f2" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.784133 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc0fd8c4cfed7a76fd2b18a0f168dd9c6dd5d3dbf65c49caa23a0c52514298f2"} err="failed to get container status \"cc0fd8c4cfed7a76fd2b18a0f168dd9c6dd5d3dbf65c49caa23a0c52514298f2\": rpc error: code = NotFound desc = could not find container \"cc0fd8c4cfed7a76fd2b18a0f168dd9c6dd5d3dbf65c49caa23a0c52514298f2\": container with ID starting with cc0fd8c4cfed7a76fd2b18a0f168dd9c6dd5d3dbf65c49caa23a0c52514298f2 not found: ID does not exist" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.784149 4932 scope.go:117] "RemoveContainer" containerID="fe658c022384004d6365621a406d8b5245d9a97c397a83fbe60d8485417582de" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.814279 4932 scope.go:117] "RemoveContainer" containerID="932c0eca1dc837e53c44a8ded75975f5f78a266b06527001d028da3c58d7fa46" Nov 25 09:17:16 crc kubenswrapper[4932]: I1125 09:17:16.838219 4932 scope.go:117] "RemoveContainer" containerID="ff6e46ed675d27273dde91a5efc502d4180d34d5bf8942d49d2075e73a9afabe" Nov 25 09:17:18 crc kubenswrapper[4932]: I1125 09:17:18.621780 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="887a5714-e658-4d6e-af56-151e97154570" path="/var/lib/kubelet/pods/887a5714-e658-4d6e-af56-151e97154570/volumes" Nov 25 09:17:18 crc kubenswrapper[4932]: I1125 09:17:18.623629 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d906346e-51e1-4db9-ae2e-f252dc08e93a" path="/var/lib/kubelet/pods/d906346e-51e1-4db9-ae2e-f252dc08e93a/volumes" Nov 25 09:17:22 crc kubenswrapper[4932]: I1125 09:17:22.606990 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:17:22 crc kubenswrapper[4932]: E1125 09:17:22.608041 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:17:36 crc kubenswrapper[4932]: I1125 09:17:36.606456 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:17:36 crc kubenswrapper[4932]: E1125 09:17:36.607621 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:17:50 crc kubenswrapper[4932]: I1125 09:17:50.614110 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:17:50 crc kubenswrapper[4932]: E1125 09:17:50.615304 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:18:01 crc kubenswrapper[4932]: I1125 09:18:01.606565 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:18:01 crc kubenswrapper[4932]: E1125 09:18:01.607574 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:18:06 crc kubenswrapper[4932]: I1125 09:18:06.666556 4932 scope.go:117] "RemoveContainer" containerID="747180a5ae1996b3690d4c5f6d543d60fd5f0c5f6b4280f2dca841186ba94062" Nov 25 09:18:06 crc kubenswrapper[4932]: I1125 09:18:06.700834 4932 scope.go:117] "RemoveContainer" containerID="3445d785af0441b8b567d57c03e6835b2bfed12872a3896dad8940d2a438528d" Nov 25 09:18:06 crc kubenswrapper[4932]: I1125 09:18:06.766041 4932 scope.go:117] "RemoveContainer" containerID="22766927fe02c87ef13b604aa23003e23b4c65e52b9aed4158cb19b4dd1cc14c" Nov 25 09:18:06 crc kubenswrapper[4932]: I1125 09:18:06.796420 4932 scope.go:117] "RemoveContainer" containerID="756e2877e3a1a5bb86487a14267262b52d648ff3b7723eaf2254117cbadf4bb4" Nov 25 09:18:06 crc kubenswrapper[4932]: I1125 09:18:06.863797 4932 scope.go:117] "RemoveContainer" containerID="21a5d0df1942575438c4ffeb87b83b3214bff47585c2d3fb6e8ea21f3a0abea3" Nov 25 09:18:06 crc kubenswrapper[4932]: I1125 09:18:06.894458 4932 scope.go:117] "RemoveContainer" containerID="5a9af99fec1a9e336010120706f5205cb52b0a56fb0e496623d4cdfc6e5717cd" Nov 25 09:18:06 crc kubenswrapper[4932]: I1125 09:18:06.918639 4932 scope.go:117] "RemoveContainer" containerID="94e26657b4459559c5225a5cdc43af92ec60a964e5e3ba7b4573d1374940e49a" Nov 25 09:18:16 crc kubenswrapper[4932]: I1125 09:18:16.606325 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:18:16 crc kubenswrapper[4932]: E1125 09:18:16.607638 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:18:31 crc kubenswrapper[4932]: I1125 09:18:31.606695 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:18:31 crc kubenswrapper[4932]: E1125 09:18:31.607657 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:18:44 crc kubenswrapper[4932]: I1125 09:18:44.605606 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:18:44 crc kubenswrapper[4932]: E1125 09:18:44.606327 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:18:57 crc kubenswrapper[4932]: I1125 09:18:57.605788 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:18:57 crc kubenswrapper[4932]: E1125 09:18:57.606687 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:19:07 crc kubenswrapper[4932]: I1125 09:19:07.072833 4932 scope.go:117] "RemoveContainer" containerID="9be820186e32cbebad327f145eb2403d74b609391365b492967c9a6948bbcbe5" Nov 25 09:19:07 crc kubenswrapper[4932]: I1125 09:19:07.134488 4932 scope.go:117] "RemoveContainer" containerID="7d27611ad3f8e0e548937326ec5872d5fd17ef030c916731538091ee33f8c092" Nov 25 09:19:07 crc kubenswrapper[4932]: I1125 09:19:07.158234 4932 scope.go:117] "RemoveContainer" containerID="4208013c150414e2c4e6a9db4af0a0ed4445d68f363fa9be56e13050961d4b79" Nov 25 09:19:07 crc kubenswrapper[4932]: I1125 09:19:07.185093 4932 scope.go:117] "RemoveContainer" containerID="7247d6a20300098ab3cb5a4ccdeaecb8b01f9585ec29af77a8b23a178fb313d8" Nov 25 09:19:07 crc kubenswrapper[4932]: I1125 09:19:07.213742 4932 scope.go:117] "RemoveContainer" containerID="ef08398ee58bcf8e60c93b50283ca6afdcbca3b7a33b1eac1c91a89fe5b90230" Nov 25 09:19:07 crc kubenswrapper[4932]: I1125 09:19:07.233538 4932 scope.go:117] "RemoveContainer" containerID="83bb76627ac457d2fce7c0ce9e6259515a96f4e8e6bad2fb47530ca487cba1e3" Nov 25 09:19:10 crc kubenswrapper[4932]: I1125 09:19:10.617332 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:19:10 crc kubenswrapper[4932]: E1125 09:19:10.618122 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:19:23 crc kubenswrapper[4932]: I1125 09:19:23.606521 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:19:23 crc kubenswrapper[4932]: E1125 09:19:23.607204 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:19:34 crc kubenswrapper[4932]: I1125 09:19:34.606537 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:19:34 crc kubenswrapper[4932]: E1125 09:19:34.607663 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:19:48 crc kubenswrapper[4932]: I1125 09:19:48.606753 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:19:49 crc kubenswrapper[4932]: I1125 09:19:49.101461 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"4486ae0cee056a8a5f14e5839d7ae36c0193b71d3f17cfedc39f667d5b49a195"} Nov 25 09:20:07 crc kubenswrapper[4932]: I1125 09:20:07.312031 4932 scope.go:117] "RemoveContainer" containerID="86482ae8db87016684a457f31b6613af3655be261977713614c37a0b9fd0465f" Nov 25 09:20:07 crc kubenswrapper[4932]: I1125 09:20:07.341162 4932 scope.go:117] "RemoveContainer" containerID="b3077aa432072fd3e5326ed9fb2a90716d917a8e76bd378c56a62bc655717477" Nov 25 09:20:07 crc kubenswrapper[4932]: I1125 09:20:07.368578 4932 scope.go:117] "RemoveContainer" containerID="c17d2b7a60e488f01f5b61b845be2ae08ecd0dcb78cfdb75a8b72ffb6d34fa27" Nov 25 09:20:07 crc kubenswrapper[4932]: I1125 09:20:07.387399 4932 scope.go:117] "RemoveContainer" containerID="bdf7364e9fd604703103c20638764530f1a8592227c9d8357f0450708d123579" Nov 25 09:22:07 crc kubenswrapper[4932]: I1125 09:22:07.181835 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:22:07 crc kubenswrapper[4932]: I1125 09:22:07.182581 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:22:37 crc kubenswrapper[4932]: I1125 09:22:37.181878 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:22:37 crc kubenswrapper[4932]: I1125 09:22:37.182789 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:23:07 crc kubenswrapper[4932]: I1125 09:23:07.180711 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:23:07 crc kubenswrapper[4932]: I1125 09:23:07.181570 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:23:07 crc kubenswrapper[4932]: I1125 09:23:07.181635 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:23:07 crc kubenswrapper[4932]: I1125 09:23:07.182592 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4486ae0cee056a8a5f14e5839d7ae36c0193b71d3f17cfedc39f667d5b49a195"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:23:07 crc kubenswrapper[4932]: I1125 09:23:07.182781 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://4486ae0cee056a8a5f14e5839d7ae36c0193b71d3f17cfedc39f667d5b49a195" gracePeriod=600 Nov 25 09:23:08 crc kubenswrapper[4932]: I1125 09:23:08.195258 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="4486ae0cee056a8a5f14e5839d7ae36c0193b71d3f17cfedc39f667d5b49a195" exitCode=0 Nov 25 09:23:08 crc kubenswrapper[4932]: I1125 09:23:08.195327 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"4486ae0cee056a8a5f14e5839d7ae36c0193b71d3f17cfedc39f667d5b49a195"} Nov 25 09:23:08 crc kubenswrapper[4932]: I1125 09:23:08.195805 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13"} Nov 25 09:23:08 crc kubenswrapper[4932]: I1125 09:23:08.195828 4932 scope.go:117] "RemoveContainer" containerID="8f436dcb16e7e842d41bddab32ee2932459d3c758fef975ea958e2d60c50d89d" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.025278 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bzkv7"] Nov 25 09:24:19 crc kubenswrapper[4932]: E1125 09:24:19.026083 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="887a5714-e658-4d6e-af56-151e97154570" containerName="extract-utilities" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.026096 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="887a5714-e658-4d6e-af56-151e97154570" containerName="extract-utilities" Nov 25 09:24:19 crc kubenswrapper[4932]: E1125 09:24:19.026112 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d906346e-51e1-4db9-ae2e-f252dc08e93a" containerName="extract-utilities" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.026118 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d906346e-51e1-4db9-ae2e-f252dc08e93a" containerName="extract-utilities" Nov 25 09:24:19 crc kubenswrapper[4932]: E1125 09:24:19.026134 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d906346e-51e1-4db9-ae2e-f252dc08e93a" containerName="registry-server" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.026143 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d906346e-51e1-4db9-ae2e-f252dc08e93a" containerName="registry-server" Nov 25 09:24:19 crc kubenswrapper[4932]: E1125 09:24:19.026151 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="887a5714-e658-4d6e-af56-151e97154570" containerName="extract-content" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.026157 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="887a5714-e658-4d6e-af56-151e97154570" containerName="extract-content" Nov 25 09:24:19 crc kubenswrapper[4932]: E1125 09:24:19.026170 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="887a5714-e658-4d6e-af56-151e97154570" containerName="registry-server" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.026176 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="887a5714-e658-4d6e-af56-151e97154570" containerName="registry-server" Nov 25 09:24:19 crc kubenswrapper[4932]: E1125 09:24:19.026188 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d906346e-51e1-4db9-ae2e-f252dc08e93a" containerName="extract-content" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.026194 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d906346e-51e1-4db9-ae2e-f252dc08e93a" containerName="extract-content" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.026335 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d906346e-51e1-4db9-ae2e-f252dc08e93a" containerName="registry-server" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.026349 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="887a5714-e658-4d6e-af56-151e97154570" containerName="registry-server" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.027587 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.049920 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bzkv7"] Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.107777 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-utilities\") pod \"redhat-operators-bzkv7\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.107833 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7md2\" (UniqueName: \"kubernetes.io/projected/aae09acd-5db8-464e-81d2-d7473a02ffa2-kube-api-access-x7md2\") pod \"redhat-operators-bzkv7\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.107856 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-catalog-content\") pod \"redhat-operators-bzkv7\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.209416 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-utilities\") pod \"redhat-operators-bzkv7\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.209493 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7md2\" (UniqueName: \"kubernetes.io/projected/aae09acd-5db8-464e-81d2-d7473a02ffa2-kube-api-access-x7md2\") pod \"redhat-operators-bzkv7\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.209522 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-catalog-content\") pod \"redhat-operators-bzkv7\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.210087 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-catalog-content\") pod \"redhat-operators-bzkv7\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.210454 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-utilities\") pod \"redhat-operators-bzkv7\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.230945 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7md2\" (UniqueName: \"kubernetes.io/projected/aae09acd-5db8-464e-81d2-d7473a02ffa2-kube-api-access-x7md2\") pod \"redhat-operators-bzkv7\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.351828 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:19 crc kubenswrapper[4932]: I1125 09:24:19.784370 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bzkv7"] Nov 25 09:24:20 crc kubenswrapper[4932]: I1125 09:24:20.763177 4932 generic.go:334] "Generic (PLEG): container finished" podID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerID="830fe96a57bff205c4fdd22512af1fc4f3c6ac9b66e446362b64255c254f91df" exitCode=0 Nov 25 09:24:20 crc kubenswrapper[4932]: I1125 09:24:20.763237 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzkv7" event={"ID":"aae09acd-5db8-464e-81d2-d7473a02ffa2","Type":"ContainerDied","Data":"830fe96a57bff205c4fdd22512af1fc4f3c6ac9b66e446362b64255c254f91df"} Nov 25 09:24:20 crc kubenswrapper[4932]: I1125 09:24:20.763261 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzkv7" event={"ID":"aae09acd-5db8-464e-81d2-d7473a02ffa2","Type":"ContainerStarted","Data":"1b48a379b26cb57222a5c6b52af7f42cd3c83ef725550ecac5dc15f3ff4173dd"} Nov 25 09:24:20 crc kubenswrapper[4932]: I1125 09:24:20.765795 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:24:21 crc kubenswrapper[4932]: I1125 09:24:21.773651 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzkv7" event={"ID":"aae09acd-5db8-464e-81d2-d7473a02ffa2","Type":"ContainerStarted","Data":"c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30"} Nov 25 09:24:22 crc kubenswrapper[4932]: I1125 09:24:22.790578 4932 generic.go:334] "Generic (PLEG): container finished" podID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerID="c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30" exitCode=0 Nov 25 09:24:22 crc kubenswrapper[4932]: I1125 09:24:22.790767 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzkv7" event={"ID":"aae09acd-5db8-464e-81d2-d7473a02ffa2","Type":"ContainerDied","Data":"c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30"} Nov 25 09:24:24 crc kubenswrapper[4932]: I1125 09:24:24.808696 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzkv7" event={"ID":"aae09acd-5db8-464e-81d2-d7473a02ffa2","Type":"ContainerStarted","Data":"d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b"} Nov 25 09:24:24 crc kubenswrapper[4932]: I1125 09:24:24.830099 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bzkv7" podStartSLOduration=2.947291161 podStartE2EDuration="5.830064592s" podCreationTimestamp="2025-11-25 09:24:19 +0000 UTC" firstStartedPulling="2025-11-25 09:24:20.765372328 +0000 UTC m=+2120.891401891" lastFinishedPulling="2025-11-25 09:24:23.648145719 +0000 UTC m=+2123.774175322" observedRunningTime="2025-11-25 09:24:24.826344986 +0000 UTC m=+2124.952374559" watchObservedRunningTime="2025-11-25 09:24:24.830064592 +0000 UTC m=+2124.956094205" Nov 25 09:24:29 crc kubenswrapper[4932]: I1125 09:24:29.353004 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:29 crc kubenswrapper[4932]: I1125 09:24:29.353685 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:30 crc kubenswrapper[4932]: I1125 09:24:30.401458 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bzkv7" podUID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerName="registry-server" probeResult="failure" output=< Nov 25 09:24:30 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 09:24:30 crc kubenswrapper[4932]: > Nov 25 09:24:39 crc kubenswrapper[4932]: I1125 09:24:39.401646 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:39 crc kubenswrapper[4932]: I1125 09:24:39.462244 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:39 crc kubenswrapper[4932]: I1125 09:24:39.634580 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bzkv7"] Nov 25 09:24:40 crc kubenswrapper[4932]: I1125 09:24:40.934827 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bzkv7" podUID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerName="registry-server" containerID="cri-o://d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b" gracePeriod=2 Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.346092 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.459939 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7md2\" (UniqueName: \"kubernetes.io/projected/aae09acd-5db8-464e-81d2-d7473a02ffa2-kube-api-access-x7md2\") pod \"aae09acd-5db8-464e-81d2-d7473a02ffa2\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.460007 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-catalog-content\") pod \"aae09acd-5db8-464e-81d2-d7473a02ffa2\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.460091 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-utilities\") pod \"aae09acd-5db8-464e-81d2-d7473a02ffa2\" (UID: \"aae09acd-5db8-464e-81d2-d7473a02ffa2\") " Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.461213 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-utilities" (OuterVolumeSpecName: "utilities") pod "aae09acd-5db8-464e-81d2-d7473a02ffa2" (UID: "aae09acd-5db8-464e-81d2-d7473a02ffa2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.466090 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aae09acd-5db8-464e-81d2-d7473a02ffa2-kube-api-access-x7md2" (OuterVolumeSpecName: "kube-api-access-x7md2") pod "aae09acd-5db8-464e-81d2-d7473a02ffa2" (UID: "aae09acd-5db8-464e-81d2-d7473a02ffa2"). InnerVolumeSpecName "kube-api-access-x7md2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.556700 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aae09acd-5db8-464e-81d2-d7473a02ffa2" (UID: "aae09acd-5db8-464e-81d2-d7473a02ffa2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.561645 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7md2\" (UniqueName: \"kubernetes.io/projected/aae09acd-5db8-464e-81d2-d7473a02ffa2-kube-api-access-x7md2\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.561719 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.561732 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aae09acd-5db8-464e-81d2-d7473a02ffa2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.943067 4932 generic.go:334] "Generic (PLEG): container finished" podID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerID="d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b" exitCode=0 Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.943122 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzkv7" event={"ID":"aae09acd-5db8-464e-81d2-d7473a02ffa2","Type":"ContainerDied","Data":"d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b"} Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.943159 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bzkv7" event={"ID":"aae09acd-5db8-464e-81d2-d7473a02ffa2","Type":"ContainerDied","Data":"1b48a379b26cb57222a5c6b52af7f42cd3c83ef725550ecac5dc15f3ff4173dd"} Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.943177 4932 scope.go:117] "RemoveContainer" containerID="d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b" Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.943172 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bzkv7" Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.963475 4932 scope.go:117] "RemoveContainer" containerID="c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30" Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.984581 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bzkv7"] Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.984665 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bzkv7"] Nov 25 09:24:41 crc kubenswrapper[4932]: I1125 09:24:41.996628 4932 scope.go:117] "RemoveContainer" containerID="830fe96a57bff205c4fdd22512af1fc4f3c6ac9b66e446362b64255c254f91df" Nov 25 09:24:42 crc kubenswrapper[4932]: I1125 09:24:42.013894 4932 scope.go:117] "RemoveContainer" containerID="d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b" Nov 25 09:24:42 crc kubenswrapper[4932]: E1125 09:24:42.014383 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b\": container with ID starting with d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b not found: ID does not exist" containerID="d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b" Nov 25 09:24:42 crc kubenswrapper[4932]: I1125 09:24:42.014479 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b"} err="failed to get container status \"d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b\": rpc error: code = NotFound desc = could not find container \"d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b\": container with ID starting with d3459a7fb98a8b7c997df42890a760e7aa254618087101b13200940eb7f0104b not found: ID does not exist" Nov 25 09:24:42 crc kubenswrapper[4932]: I1125 09:24:42.014558 4932 scope.go:117] "RemoveContainer" containerID="c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30" Nov 25 09:24:42 crc kubenswrapper[4932]: E1125 09:24:42.015068 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30\": container with ID starting with c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30 not found: ID does not exist" containerID="c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30" Nov 25 09:24:42 crc kubenswrapper[4932]: I1125 09:24:42.015103 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30"} err="failed to get container status \"c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30\": rpc error: code = NotFound desc = could not find container \"c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30\": container with ID starting with c22833bcb3b27a2a8358f9fc84d3af59acf3626e159cd270cf2912f9bd32cd30 not found: ID does not exist" Nov 25 09:24:42 crc kubenswrapper[4932]: I1125 09:24:42.015149 4932 scope.go:117] "RemoveContainer" containerID="830fe96a57bff205c4fdd22512af1fc4f3c6ac9b66e446362b64255c254f91df" Nov 25 09:24:42 crc kubenswrapper[4932]: E1125 09:24:42.015554 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"830fe96a57bff205c4fdd22512af1fc4f3c6ac9b66e446362b64255c254f91df\": container with ID starting with 830fe96a57bff205c4fdd22512af1fc4f3c6ac9b66e446362b64255c254f91df not found: ID does not exist" containerID="830fe96a57bff205c4fdd22512af1fc4f3c6ac9b66e446362b64255c254f91df" Nov 25 09:24:42 crc kubenswrapper[4932]: I1125 09:24:42.015632 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"830fe96a57bff205c4fdd22512af1fc4f3c6ac9b66e446362b64255c254f91df"} err="failed to get container status \"830fe96a57bff205c4fdd22512af1fc4f3c6ac9b66e446362b64255c254f91df\": rpc error: code = NotFound desc = could not find container \"830fe96a57bff205c4fdd22512af1fc4f3c6ac9b66e446362b64255c254f91df\": container with ID starting with 830fe96a57bff205c4fdd22512af1fc4f3c6ac9b66e446362b64255c254f91df not found: ID does not exist" Nov 25 09:24:42 crc kubenswrapper[4932]: I1125 09:24:42.613610 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aae09acd-5db8-464e-81d2-d7473a02ffa2" path="/var/lib/kubelet/pods/aae09acd-5db8-464e-81d2-d7473a02ffa2/volumes" Nov 25 09:25:07 crc kubenswrapper[4932]: I1125 09:25:07.181043 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:25:07 crc kubenswrapper[4932]: I1125 09:25:07.181607 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:25:37 crc kubenswrapper[4932]: I1125 09:25:37.181597 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:25:37 crc kubenswrapper[4932]: I1125 09:25:37.181988 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:26:07 crc kubenswrapper[4932]: I1125 09:26:07.181506 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:26:07 crc kubenswrapper[4932]: I1125 09:26:07.182322 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:26:07 crc kubenswrapper[4932]: I1125 09:26:07.182390 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:26:07 crc kubenswrapper[4932]: I1125 09:26:07.183112 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:26:07 crc kubenswrapper[4932]: I1125 09:26:07.183219 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" gracePeriod=600 Nov 25 09:26:07 crc kubenswrapper[4932]: I1125 09:26:07.688067 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" exitCode=0 Nov 25 09:26:07 crc kubenswrapper[4932]: I1125 09:26:07.688119 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13"} Nov 25 09:26:07 crc kubenswrapper[4932]: I1125 09:26:07.688212 4932 scope.go:117] "RemoveContainer" containerID="4486ae0cee056a8a5f14e5839d7ae36c0193b71d3f17cfedc39f667d5b49a195" Nov 25 09:26:08 crc kubenswrapper[4932]: E1125 09:26:08.037437 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:26:08 crc kubenswrapper[4932]: I1125 09:26:08.697983 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:26:08 crc kubenswrapper[4932]: E1125 09:26:08.698553 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:26:23 crc kubenswrapper[4932]: I1125 09:26:23.606502 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:26:23 crc kubenswrapper[4932]: E1125 09:26:23.607725 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:26:34 crc kubenswrapper[4932]: I1125 09:26:34.606453 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:26:34 crc kubenswrapper[4932]: E1125 09:26:34.608578 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:26:45 crc kubenswrapper[4932]: I1125 09:26:45.606113 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:26:45 crc kubenswrapper[4932]: E1125 09:26:45.607123 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:26:57 crc kubenswrapper[4932]: I1125 09:26:57.606375 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:26:57 crc kubenswrapper[4932]: E1125 09:26:57.607246 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.771078 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4xbgj"] Nov 25 09:27:09 crc kubenswrapper[4932]: E1125 09:27:09.772451 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerName="extract-content" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.772476 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerName="extract-content" Nov 25 09:27:09 crc kubenswrapper[4932]: E1125 09:27:09.772513 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerName="registry-server" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.772523 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerName="registry-server" Nov 25 09:27:09 crc kubenswrapper[4932]: E1125 09:27:09.772549 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerName="extract-utilities" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.772562 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerName="extract-utilities" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.772827 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="aae09acd-5db8-464e-81d2-d7473a02ffa2" containerName="registry-server" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.774854 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.793521 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4xbgj"] Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.869877 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbj4c\" (UniqueName: \"kubernetes.io/projected/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-kube-api-access-hbj4c\") pod \"community-operators-4xbgj\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.869942 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-catalog-content\") pod \"community-operators-4xbgj\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.870023 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-utilities\") pod \"community-operators-4xbgj\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.971666 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbj4c\" (UniqueName: \"kubernetes.io/projected/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-kube-api-access-hbj4c\") pod \"community-operators-4xbgj\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.971726 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-catalog-content\") pod \"community-operators-4xbgj\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.971939 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-utilities\") pod \"community-operators-4xbgj\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.972423 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-catalog-content\") pod \"community-operators-4xbgj\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:09 crc kubenswrapper[4932]: I1125 09:27:09.972469 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-utilities\") pod \"community-operators-4xbgj\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:10 crc kubenswrapper[4932]: I1125 09:27:09.999093 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbj4c\" (UniqueName: \"kubernetes.io/projected/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-kube-api-access-hbj4c\") pod \"community-operators-4xbgj\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:10 crc kubenswrapper[4932]: I1125 09:27:10.095119 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:10 crc kubenswrapper[4932]: I1125 09:27:10.602416 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4xbgj"] Nov 25 09:27:10 crc kubenswrapper[4932]: I1125 09:27:10.615425 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:27:10 crc kubenswrapper[4932]: E1125 09:27:10.615946 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:27:11 crc kubenswrapper[4932]: I1125 09:27:11.213767 4932 generic.go:334] "Generic (PLEG): container finished" podID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" containerID="942d899f268e4aa210b650b9c50b11ed953c49884bad41c27a7c529c77b49b2f" exitCode=0 Nov 25 09:27:11 crc kubenswrapper[4932]: I1125 09:27:11.213827 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xbgj" event={"ID":"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6","Type":"ContainerDied","Data":"942d899f268e4aa210b650b9c50b11ed953c49884bad41c27a7c529c77b49b2f"} Nov 25 09:27:11 crc kubenswrapper[4932]: I1125 09:27:11.213885 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xbgj" event={"ID":"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6","Type":"ContainerStarted","Data":"fa2bd6849c7a0fdaf8409016cce8dbd4ad9d18a7e6267dba6b5d0735d115ea02"} Nov 25 09:27:14 crc kubenswrapper[4932]: I1125 09:27:14.241058 4932 generic.go:334] "Generic (PLEG): container finished" podID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" containerID="32e4339bdf69e76f924d905e2010c360042c0c25dc71d7f2bcf755c6dfde172c" exitCode=0 Nov 25 09:27:14 crc kubenswrapper[4932]: I1125 09:27:14.241126 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xbgj" event={"ID":"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6","Type":"ContainerDied","Data":"32e4339bdf69e76f924d905e2010c360042c0c25dc71d7f2bcf755c6dfde172c"} Nov 25 09:27:16 crc kubenswrapper[4932]: I1125 09:27:16.259733 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xbgj" event={"ID":"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6","Type":"ContainerStarted","Data":"30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0"} Nov 25 09:27:16 crc kubenswrapper[4932]: I1125 09:27:16.285668 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4xbgj" podStartSLOduration=2.90173562 podStartE2EDuration="7.2856368s" podCreationTimestamp="2025-11-25 09:27:09 +0000 UTC" firstStartedPulling="2025-11-25 09:27:11.216031779 +0000 UTC m=+2291.342061352" lastFinishedPulling="2025-11-25 09:27:15.599932969 +0000 UTC m=+2295.725962532" observedRunningTime="2025-11-25 09:27:16.279730701 +0000 UTC m=+2296.405760264" watchObservedRunningTime="2025-11-25 09:27:16.2856368 +0000 UTC m=+2296.411666383" Nov 25 09:27:20 crc kubenswrapper[4932]: I1125 09:27:20.095303 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:20 crc kubenswrapper[4932]: I1125 09:27:20.095717 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:20 crc kubenswrapper[4932]: I1125 09:27:20.138229 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:21 crc kubenswrapper[4932]: I1125 09:27:21.340292 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:21 crc kubenswrapper[4932]: I1125 09:27:21.387528 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4xbgj"] Nov 25 09:27:21 crc kubenswrapper[4932]: I1125 09:27:21.606323 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:27:21 crc kubenswrapper[4932]: E1125 09:27:21.606822 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:27:23 crc kubenswrapper[4932]: I1125 09:27:23.312616 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4xbgj" podUID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" containerName="registry-server" containerID="cri-o://30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0" gracePeriod=2 Nov 25 09:27:23 crc kubenswrapper[4932]: I1125 09:27:23.902925 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.024119 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-utilities\") pod \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.024517 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hbj4c\" (UniqueName: \"kubernetes.io/projected/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-kube-api-access-hbj4c\") pod \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.024576 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-catalog-content\") pod \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\" (UID: \"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6\") " Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.025365 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-utilities" (OuterVolumeSpecName: "utilities") pod "be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" (UID: "be3c5e2f-4af3-42bc-9083-ccdfe949dfa6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.029933 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-kube-api-access-hbj4c" (OuterVolumeSpecName: "kube-api-access-hbj4c") pod "be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" (UID: "be3c5e2f-4af3-42bc-9083-ccdfe949dfa6"). InnerVolumeSpecName "kube-api-access-hbj4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.125924 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hbj4c\" (UniqueName: \"kubernetes.io/projected/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-kube-api-access-hbj4c\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.125966 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.326234 4932 generic.go:334] "Generic (PLEG): container finished" podID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" containerID="30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0" exitCode=0 Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.326305 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xbgj" event={"ID":"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6","Type":"ContainerDied","Data":"30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0"} Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.326357 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4xbgj" event={"ID":"be3c5e2f-4af3-42bc-9083-ccdfe949dfa6","Type":"ContainerDied","Data":"fa2bd6849c7a0fdaf8409016cce8dbd4ad9d18a7e6267dba6b5d0735d115ea02"} Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.326389 4932 scope.go:117] "RemoveContainer" containerID="30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.326389 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4xbgj" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.350961 4932 scope.go:117] "RemoveContainer" containerID="32e4339bdf69e76f924d905e2010c360042c0c25dc71d7f2bcf755c6dfde172c" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.375583 4932 scope.go:117] "RemoveContainer" containerID="942d899f268e4aa210b650b9c50b11ed953c49884bad41c27a7c529c77b49b2f" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.394986 4932 scope.go:117] "RemoveContainer" containerID="30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0" Nov 25 09:27:24 crc kubenswrapper[4932]: E1125 09:27:24.395550 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0\": container with ID starting with 30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0 not found: ID does not exist" containerID="30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.395609 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0"} err="failed to get container status \"30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0\": rpc error: code = NotFound desc = could not find container \"30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0\": container with ID starting with 30e82ce3452eff08f8f7cad68336e97e4e9d31b0c15f2b25d31193975a92fca0 not found: ID does not exist" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.395645 4932 scope.go:117] "RemoveContainer" containerID="32e4339bdf69e76f924d905e2010c360042c0c25dc71d7f2bcf755c6dfde172c" Nov 25 09:27:24 crc kubenswrapper[4932]: E1125 09:27:24.395927 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32e4339bdf69e76f924d905e2010c360042c0c25dc71d7f2bcf755c6dfde172c\": container with ID starting with 32e4339bdf69e76f924d905e2010c360042c0c25dc71d7f2bcf755c6dfde172c not found: ID does not exist" containerID="32e4339bdf69e76f924d905e2010c360042c0c25dc71d7f2bcf755c6dfde172c" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.395964 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32e4339bdf69e76f924d905e2010c360042c0c25dc71d7f2bcf755c6dfde172c"} err="failed to get container status \"32e4339bdf69e76f924d905e2010c360042c0c25dc71d7f2bcf755c6dfde172c\": rpc error: code = NotFound desc = could not find container \"32e4339bdf69e76f924d905e2010c360042c0c25dc71d7f2bcf755c6dfde172c\": container with ID starting with 32e4339bdf69e76f924d905e2010c360042c0c25dc71d7f2bcf755c6dfde172c not found: ID does not exist" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.395979 4932 scope.go:117] "RemoveContainer" containerID="942d899f268e4aa210b650b9c50b11ed953c49884bad41c27a7c529c77b49b2f" Nov 25 09:27:24 crc kubenswrapper[4932]: E1125 09:27:24.396296 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"942d899f268e4aa210b650b9c50b11ed953c49884bad41c27a7c529c77b49b2f\": container with ID starting with 942d899f268e4aa210b650b9c50b11ed953c49884bad41c27a7c529c77b49b2f not found: ID does not exist" containerID="942d899f268e4aa210b650b9c50b11ed953c49884bad41c27a7c529c77b49b2f" Nov 25 09:27:24 crc kubenswrapper[4932]: I1125 09:27:24.396328 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"942d899f268e4aa210b650b9c50b11ed953c49884bad41c27a7c529c77b49b2f"} err="failed to get container status \"942d899f268e4aa210b650b9c50b11ed953c49884bad41c27a7c529c77b49b2f\": rpc error: code = NotFound desc = could not find container \"942d899f268e4aa210b650b9c50b11ed953c49884bad41c27a7c529c77b49b2f\": container with ID starting with 942d899f268e4aa210b650b9c50b11ed953c49884bad41c27a7c529c77b49b2f not found: ID does not exist" Nov 25 09:27:25 crc kubenswrapper[4932]: I1125 09:27:25.178825 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" (UID: "be3c5e2f-4af3-42bc-9083-ccdfe949dfa6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:27:25 crc kubenswrapper[4932]: I1125 09:27:25.244840 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:27:25 crc kubenswrapper[4932]: I1125 09:27:25.284493 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4xbgj"] Nov 25 09:27:25 crc kubenswrapper[4932]: I1125 09:27:25.296019 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4xbgj"] Nov 25 09:27:26 crc kubenswrapper[4932]: I1125 09:27:26.617460 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" path="/var/lib/kubelet/pods/be3c5e2f-4af3-42bc-9083-ccdfe949dfa6/volumes" Nov 25 09:27:34 crc kubenswrapper[4932]: I1125 09:27:34.606053 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:27:34 crc kubenswrapper[4932]: E1125 09:27:34.606901 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:27:46 crc kubenswrapper[4932]: I1125 09:27:46.606512 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:27:46 crc kubenswrapper[4932]: E1125 09:27:46.607628 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:28:01 crc kubenswrapper[4932]: I1125 09:28:01.605605 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:28:01 crc kubenswrapper[4932]: E1125 09:28:01.606359 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:28:13 crc kubenswrapper[4932]: I1125 09:28:13.606627 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:28:13 crc kubenswrapper[4932]: E1125 09:28:13.607773 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:28:25 crc kubenswrapper[4932]: I1125 09:28:25.606348 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:28:25 crc kubenswrapper[4932]: E1125 09:28:25.607032 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:28:36 crc kubenswrapper[4932]: I1125 09:28:36.606782 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:28:36 crc kubenswrapper[4932]: E1125 09:28:36.608003 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:28:50 crc kubenswrapper[4932]: I1125 09:28:50.611360 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:28:50 crc kubenswrapper[4932]: E1125 09:28:50.612212 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:29:04 crc kubenswrapper[4932]: I1125 09:29:04.606330 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:29:04 crc kubenswrapper[4932]: E1125 09:29:04.607444 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:29:19 crc kubenswrapper[4932]: I1125 09:29:19.606133 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:29:19 crc kubenswrapper[4932]: E1125 09:29:19.606993 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:29:30 crc kubenswrapper[4932]: I1125 09:29:30.610675 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:29:30 crc kubenswrapper[4932]: E1125 09:29:30.611456 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.372500 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gxkwt"] Nov 25 09:29:39 crc kubenswrapper[4932]: E1125 09:29:39.373903 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" containerName="extract-utilities" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.373921 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" containerName="extract-utilities" Nov 25 09:29:39 crc kubenswrapper[4932]: E1125 09:29:39.373941 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" containerName="registry-server" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.373948 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" containerName="registry-server" Nov 25 09:29:39 crc kubenswrapper[4932]: E1125 09:29:39.373974 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" containerName="extract-content" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.373981 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" containerName="extract-content" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.374160 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="be3c5e2f-4af3-42bc-9083-ccdfe949dfa6" containerName="registry-server" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.375492 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.389039 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gxkwt"] Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.553773 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-catalog-content\") pod \"redhat-marketplace-gxkwt\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.553888 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fw6z2\" (UniqueName: \"kubernetes.io/projected/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-kube-api-access-fw6z2\") pod \"redhat-marketplace-gxkwt\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.553921 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-utilities\") pod \"redhat-marketplace-gxkwt\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.654849 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fw6z2\" (UniqueName: \"kubernetes.io/projected/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-kube-api-access-fw6z2\") pod \"redhat-marketplace-gxkwt\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.654916 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-utilities\") pod \"redhat-marketplace-gxkwt\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.654997 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-catalog-content\") pod \"redhat-marketplace-gxkwt\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.655496 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-catalog-content\") pod \"redhat-marketplace-gxkwt\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.655662 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-utilities\") pod \"redhat-marketplace-gxkwt\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.675712 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fw6z2\" (UniqueName: \"kubernetes.io/projected/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-kube-api-access-fw6z2\") pod \"redhat-marketplace-gxkwt\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:39 crc kubenswrapper[4932]: I1125 09:29:39.703773 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:40 crc kubenswrapper[4932]: I1125 09:29:40.130311 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gxkwt"] Nov 25 09:29:40 crc kubenswrapper[4932]: I1125 09:29:40.344267 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gxkwt" event={"ID":"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052","Type":"ContainerStarted","Data":"85c393e92e0b074729ef7c2038fa21d2b741ffd278cea72bddc298b082db6aa6"} Nov 25 09:29:41 crc kubenswrapper[4932]: I1125 09:29:41.355092 4932 generic.go:334] "Generic (PLEG): container finished" podID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" containerID="17b065dabc2acbc6a1ff19d820817b45fe70e4bffb013f3ce2e0b8623e66d087" exitCode=0 Nov 25 09:29:41 crc kubenswrapper[4932]: I1125 09:29:41.355240 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gxkwt" event={"ID":"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052","Type":"ContainerDied","Data":"17b065dabc2acbc6a1ff19d820817b45fe70e4bffb013f3ce2e0b8623e66d087"} Nov 25 09:29:41 crc kubenswrapper[4932]: I1125 09:29:41.356852 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:29:44 crc kubenswrapper[4932]: I1125 09:29:44.394733 4932 generic.go:334] "Generic (PLEG): container finished" podID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" containerID="af1ed16563ed2adb61bcd7d60233ddda8885581575dadf990a3e177a13da19c3" exitCode=0 Nov 25 09:29:44 crc kubenswrapper[4932]: I1125 09:29:44.395061 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gxkwt" event={"ID":"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052","Type":"ContainerDied","Data":"af1ed16563ed2adb61bcd7d60233ddda8885581575dadf990a3e177a13da19c3"} Nov 25 09:29:44 crc kubenswrapper[4932]: I1125 09:29:44.606143 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:29:44 crc kubenswrapper[4932]: E1125 09:29:44.606592 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:29:45 crc kubenswrapper[4932]: I1125 09:29:45.977338 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9sxsh"] Nov 25 09:29:45 crc kubenswrapper[4932]: I1125 09:29:45.979500 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.002719 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9sxsh"] Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.161793 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5hvm\" (UniqueName: \"kubernetes.io/projected/a5efbbca-c276-4e6b-add7-df3a25f056ca-kube-api-access-s5hvm\") pod \"certified-operators-9sxsh\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.161914 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-utilities\") pod \"certified-operators-9sxsh\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.161949 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-catalog-content\") pod \"certified-operators-9sxsh\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.263215 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5hvm\" (UniqueName: \"kubernetes.io/projected/a5efbbca-c276-4e6b-add7-df3a25f056ca-kube-api-access-s5hvm\") pod \"certified-operators-9sxsh\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.263310 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-utilities\") pod \"certified-operators-9sxsh\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.263336 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-catalog-content\") pod \"certified-operators-9sxsh\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.263975 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-catalog-content\") pod \"certified-operators-9sxsh\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.264069 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-utilities\") pod \"certified-operators-9sxsh\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.288636 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5hvm\" (UniqueName: \"kubernetes.io/projected/a5efbbca-c276-4e6b-add7-df3a25f056ca-kube-api-access-s5hvm\") pod \"certified-operators-9sxsh\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.345440 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:29:46 crc kubenswrapper[4932]: I1125 09:29:46.808684 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9sxsh"] Nov 25 09:29:46 crc kubenswrapper[4932]: W1125 09:29:46.827456 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5efbbca_c276_4e6b_add7_df3a25f056ca.slice/crio-74cbaae09f80d59fc9bb49cf8bc4007bd6807c36b956e6395c58f383b4bbaf90 WatchSource:0}: Error finding container 74cbaae09f80d59fc9bb49cf8bc4007bd6807c36b956e6395c58f383b4bbaf90: Status 404 returned error can't find the container with id 74cbaae09f80d59fc9bb49cf8bc4007bd6807c36b956e6395c58f383b4bbaf90 Nov 25 09:29:47 crc kubenswrapper[4932]: I1125 09:29:47.417839 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9sxsh" event={"ID":"a5efbbca-c276-4e6b-add7-df3a25f056ca","Type":"ContainerStarted","Data":"74cbaae09f80d59fc9bb49cf8bc4007bd6807c36b956e6395c58f383b4bbaf90"} Nov 25 09:29:48 crc kubenswrapper[4932]: I1125 09:29:48.425820 4932 generic.go:334] "Generic (PLEG): container finished" podID="a5efbbca-c276-4e6b-add7-df3a25f056ca" containerID="63aeed7246ba6ec29dbbfbc120d5de03d23f0f60e4921a6be9a95f14d29da736" exitCode=0 Nov 25 09:29:48 crc kubenswrapper[4932]: I1125 09:29:48.425864 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9sxsh" event={"ID":"a5efbbca-c276-4e6b-add7-df3a25f056ca","Type":"ContainerDied","Data":"63aeed7246ba6ec29dbbfbc120d5de03d23f0f60e4921a6be9a95f14d29da736"} Nov 25 09:29:48 crc kubenswrapper[4932]: I1125 09:29:48.429223 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gxkwt" event={"ID":"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052","Type":"ContainerStarted","Data":"70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993"} Nov 25 09:29:48 crc kubenswrapper[4932]: I1125 09:29:48.462967 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gxkwt" podStartSLOduration=3.331834986 podStartE2EDuration="9.462947507s" podCreationTimestamp="2025-11-25 09:29:39 +0000 UTC" firstStartedPulling="2025-11-25 09:29:41.35664045 +0000 UTC m=+2441.482670013" lastFinishedPulling="2025-11-25 09:29:47.487752971 +0000 UTC m=+2447.613782534" observedRunningTime="2025-11-25 09:29:48.462132344 +0000 UTC m=+2448.588161937" watchObservedRunningTime="2025-11-25 09:29:48.462947507 +0000 UTC m=+2448.588977070" Nov 25 09:29:49 crc kubenswrapper[4932]: I1125 09:29:49.704340 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:49 crc kubenswrapper[4932]: I1125 09:29:49.704419 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:49 crc kubenswrapper[4932]: I1125 09:29:49.770825 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:54 crc kubenswrapper[4932]: I1125 09:29:54.479974 4932 generic.go:334] "Generic (PLEG): container finished" podID="a5efbbca-c276-4e6b-add7-df3a25f056ca" containerID="f3b5b96f9d6f4714df4a0015a84e2d5a159845c0267fec22875d9a724e5decd5" exitCode=0 Nov 25 09:29:54 crc kubenswrapper[4932]: I1125 09:29:54.480084 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9sxsh" event={"ID":"a5efbbca-c276-4e6b-add7-df3a25f056ca","Type":"ContainerDied","Data":"f3b5b96f9d6f4714df4a0015a84e2d5a159845c0267fec22875d9a724e5decd5"} Nov 25 09:29:57 crc kubenswrapper[4932]: I1125 09:29:57.500526 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9sxsh" event={"ID":"a5efbbca-c276-4e6b-add7-df3a25f056ca","Type":"ContainerStarted","Data":"a0098caca51e5c588a357a8d7f2c0b36456f4813847985177d154db20c19acee"} Nov 25 09:29:57 crc kubenswrapper[4932]: I1125 09:29:57.521938 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9sxsh" podStartSLOduration=4.192276719 podStartE2EDuration="12.521919052s" podCreationTimestamp="2025-11-25 09:29:45 +0000 UTC" firstStartedPulling="2025-11-25 09:29:48.42893852 +0000 UTC m=+2448.554968083" lastFinishedPulling="2025-11-25 09:29:56.758580853 +0000 UTC m=+2456.884610416" observedRunningTime="2025-11-25 09:29:57.517362661 +0000 UTC m=+2457.643392244" watchObservedRunningTime="2025-11-25 09:29:57.521919052 +0000 UTC m=+2457.647948615" Nov 25 09:29:59 crc kubenswrapper[4932]: I1125 09:29:59.606528 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:29:59 crc kubenswrapper[4932]: E1125 09:29:59.606878 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:29:59 crc kubenswrapper[4932]: I1125 09:29:59.748329 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:29:59 crc kubenswrapper[4932]: I1125 09:29:59.791318 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gxkwt"] Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.154110 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d"] Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.155688 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.158282 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.158344 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.164480 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d"] Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.272128 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a398f8a1-0132-4daf-b96b-885c2c15bcfa-secret-volume\") pod \"collect-profiles-29401050-4n64d\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.272304 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a398f8a1-0132-4daf-b96b-885c2c15bcfa-config-volume\") pod \"collect-profiles-29401050-4n64d\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.272466 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-458t5\" (UniqueName: \"kubernetes.io/projected/a398f8a1-0132-4daf-b96b-885c2c15bcfa-kube-api-access-458t5\") pod \"collect-profiles-29401050-4n64d\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.374162 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a398f8a1-0132-4daf-b96b-885c2c15bcfa-config-volume\") pod \"collect-profiles-29401050-4n64d\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.374260 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-458t5\" (UniqueName: \"kubernetes.io/projected/a398f8a1-0132-4daf-b96b-885c2c15bcfa-kube-api-access-458t5\") pod \"collect-profiles-29401050-4n64d\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.374297 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a398f8a1-0132-4daf-b96b-885c2c15bcfa-secret-volume\") pod \"collect-profiles-29401050-4n64d\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.375018 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a398f8a1-0132-4daf-b96b-885c2c15bcfa-config-volume\") pod \"collect-profiles-29401050-4n64d\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.380628 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a398f8a1-0132-4daf-b96b-885c2c15bcfa-secret-volume\") pod \"collect-profiles-29401050-4n64d\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.390802 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-458t5\" (UniqueName: \"kubernetes.io/projected/a398f8a1-0132-4daf-b96b-885c2c15bcfa-kube-api-access-458t5\") pod \"collect-profiles-29401050-4n64d\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.475830 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.539556 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gxkwt" podUID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" containerName="registry-server" containerID="cri-o://70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993" gracePeriod=2 Nov 25 09:30:00 crc kubenswrapper[4932]: I1125 09:30:00.955701 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d"] Nov 25 09:30:00 crc kubenswrapper[4932]: W1125 09:30:00.960054 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda398f8a1_0132_4daf_b96b_885c2c15bcfa.slice/crio-eaf8f39e65ad1250dc2746f709031bf66ee1a530f3c08638ab62c689fabdbd34 WatchSource:0}: Error finding container eaf8f39e65ad1250dc2746f709031bf66ee1a530f3c08638ab62c689fabdbd34: Status 404 returned error can't find the container with id eaf8f39e65ad1250dc2746f709031bf66ee1a530f3c08638ab62c689fabdbd34 Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.479171 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.547013 4932 generic.go:334] "Generic (PLEG): container finished" podID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" containerID="70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993" exitCode=0 Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.547092 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gxkwt" event={"ID":"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052","Type":"ContainerDied","Data":"70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993"} Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.547119 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gxkwt" event={"ID":"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052","Type":"ContainerDied","Data":"85c393e92e0b074729ef7c2038fa21d2b741ffd278cea72bddc298b082db6aa6"} Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.547137 4932 scope.go:117] "RemoveContainer" containerID="70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.547356 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gxkwt" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.549535 4932 generic.go:334] "Generic (PLEG): container finished" podID="a398f8a1-0132-4daf-b96b-885c2c15bcfa" containerID="cf61ff54fe204ad9b467ac67a8bcb270edcb42989d6d6f38225cd11a86530e32" exitCode=0 Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.549629 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" event={"ID":"a398f8a1-0132-4daf-b96b-885c2c15bcfa","Type":"ContainerDied","Data":"cf61ff54fe204ad9b467ac67a8bcb270edcb42989d6d6f38225cd11a86530e32"} Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.549659 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" event={"ID":"a398f8a1-0132-4daf-b96b-885c2c15bcfa","Type":"ContainerStarted","Data":"eaf8f39e65ad1250dc2746f709031bf66ee1a530f3c08638ab62c689fabdbd34"} Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.571978 4932 scope.go:117] "RemoveContainer" containerID="af1ed16563ed2adb61bcd7d60233ddda8885581575dadf990a3e177a13da19c3" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.591667 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fw6z2\" (UniqueName: \"kubernetes.io/projected/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-kube-api-access-fw6z2\") pod \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.591722 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-catalog-content\") pod \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.591762 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-utilities\") pod \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\" (UID: \"9e38afc1-5f0c-4066-b6b7-8cbba7f5e052\") " Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.592865 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-utilities" (OuterVolumeSpecName: "utilities") pod "9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" (UID: "9e38afc1-5f0c-4066-b6b7-8cbba7f5e052"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.599345 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-kube-api-access-fw6z2" (OuterVolumeSpecName: "kube-api-access-fw6z2") pod "9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" (UID: "9e38afc1-5f0c-4066-b6b7-8cbba7f5e052"). InnerVolumeSpecName "kube-api-access-fw6z2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.610393 4932 scope.go:117] "RemoveContainer" containerID="17b065dabc2acbc6a1ff19d820817b45fe70e4bffb013f3ce2e0b8623e66d087" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.621231 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" (UID: "9e38afc1-5f0c-4066-b6b7-8cbba7f5e052"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.655128 4932 scope.go:117] "RemoveContainer" containerID="70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993" Nov 25 09:30:01 crc kubenswrapper[4932]: E1125 09:30:01.657545 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993\": container with ID starting with 70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993 not found: ID does not exist" containerID="70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.657581 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993"} err="failed to get container status \"70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993\": rpc error: code = NotFound desc = could not find container \"70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993\": container with ID starting with 70baa798e48bd115da70100b1722118f758f8337f2f1e0ed77d91c14d0dd8993 not found: ID does not exist" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.657603 4932 scope.go:117] "RemoveContainer" containerID="af1ed16563ed2adb61bcd7d60233ddda8885581575dadf990a3e177a13da19c3" Nov 25 09:30:01 crc kubenswrapper[4932]: E1125 09:30:01.667597 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af1ed16563ed2adb61bcd7d60233ddda8885581575dadf990a3e177a13da19c3\": container with ID starting with af1ed16563ed2adb61bcd7d60233ddda8885581575dadf990a3e177a13da19c3 not found: ID does not exist" containerID="af1ed16563ed2adb61bcd7d60233ddda8885581575dadf990a3e177a13da19c3" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.667652 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af1ed16563ed2adb61bcd7d60233ddda8885581575dadf990a3e177a13da19c3"} err="failed to get container status \"af1ed16563ed2adb61bcd7d60233ddda8885581575dadf990a3e177a13da19c3\": rpc error: code = NotFound desc = could not find container \"af1ed16563ed2adb61bcd7d60233ddda8885581575dadf990a3e177a13da19c3\": container with ID starting with af1ed16563ed2adb61bcd7d60233ddda8885581575dadf990a3e177a13da19c3 not found: ID does not exist" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.667687 4932 scope.go:117] "RemoveContainer" containerID="17b065dabc2acbc6a1ff19d820817b45fe70e4bffb013f3ce2e0b8623e66d087" Nov 25 09:30:01 crc kubenswrapper[4932]: E1125 09:30:01.678728 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17b065dabc2acbc6a1ff19d820817b45fe70e4bffb013f3ce2e0b8623e66d087\": container with ID starting with 17b065dabc2acbc6a1ff19d820817b45fe70e4bffb013f3ce2e0b8623e66d087 not found: ID does not exist" containerID="17b065dabc2acbc6a1ff19d820817b45fe70e4bffb013f3ce2e0b8623e66d087" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.678784 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17b065dabc2acbc6a1ff19d820817b45fe70e4bffb013f3ce2e0b8623e66d087"} err="failed to get container status \"17b065dabc2acbc6a1ff19d820817b45fe70e4bffb013f3ce2e0b8623e66d087\": rpc error: code = NotFound desc = could not find container \"17b065dabc2acbc6a1ff19d820817b45fe70e4bffb013f3ce2e0b8623e66d087\": container with ID starting with 17b065dabc2acbc6a1ff19d820817b45fe70e4bffb013f3ce2e0b8623e66d087 not found: ID does not exist" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.695997 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fw6z2\" (UniqueName: \"kubernetes.io/projected/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-kube-api-access-fw6z2\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.696047 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.696059 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.881834 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gxkwt"] Nov 25 09:30:01 crc kubenswrapper[4932]: I1125 09:30:01.887967 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gxkwt"] Nov 25 09:30:02 crc kubenswrapper[4932]: I1125 09:30:02.615130 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" path="/var/lib/kubelet/pods/9e38afc1-5f0c-4066-b6b7-8cbba7f5e052/volumes" Nov 25 09:30:02 crc kubenswrapper[4932]: I1125 09:30:02.812104 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:02 crc kubenswrapper[4932]: I1125 09:30:02.911834 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-458t5\" (UniqueName: \"kubernetes.io/projected/a398f8a1-0132-4daf-b96b-885c2c15bcfa-kube-api-access-458t5\") pod \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " Nov 25 09:30:02 crc kubenswrapper[4932]: I1125 09:30:02.911938 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a398f8a1-0132-4daf-b96b-885c2c15bcfa-config-volume\") pod \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " Nov 25 09:30:02 crc kubenswrapper[4932]: I1125 09:30:02.912008 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a398f8a1-0132-4daf-b96b-885c2c15bcfa-secret-volume\") pod \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\" (UID: \"a398f8a1-0132-4daf-b96b-885c2c15bcfa\") " Nov 25 09:30:02 crc kubenswrapper[4932]: I1125 09:30:02.914750 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a398f8a1-0132-4daf-b96b-885c2c15bcfa-config-volume" (OuterVolumeSpecName: "config-volume") pod "a398f8a1-0132-4daf-b96b-885c2c15bcfa" (UID: "a398f8a1-0132-4daf-b96b-885c2c15bcfa"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:30:02 crc kubenswrapper[4932]: I1125 09:30:02.916846 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a398f8a1-0132-4daf-b96b-885c2c15bcfa-kube-api-access-458t5" (OuterVolumeSpecName: "kube-api-access-458t5") pod "a398f8a1-0132-4daf-b96b-885c2c15bcfa" (UID: "a398f8a1-0132-4daf-b96b-885c2c15bcfa"). InnerVolumeSpecName "kube-api-access-458t5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:30:02 crc kubenswrapper[4932]: I1125 09:30:02.917011 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a398f8a1-0132-4daf-b96b-885c2c15bcfa-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a398f8a1-0132-4daf-b96b-885c2c15bcfa" (UID: "a398f8a1-0132-4daf-b96b-885c2c15bcfa"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:30:03 crc kubenswrapper[4932]: I1125 09:30:03.014456 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-458t5\" (UniqueName: \"kubernetes.io/projected/a398f8a1-0132-4daf-b96b-885c2c15bcfa-kube-api-access-458t5\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:03 crc kubenswrapper[4932]: I1125 09:30:03.014843 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a398f8a1-0132-4daf-b96b-885c2c15bcfa-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:03 crc kubenswrapper[4932]: I1125 09:30:03.014863 4932 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a398f8a1-0132-4daf-b96b-885c2c15bcfa-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:03 crc kubenswrapper[4932]: I1125 09:30:03.566641 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" event={"ID":"a398f8a1-0132-4daf-b96b-885c2c15bcfa","Type":"ContainerDied","Data":"eaf8f39e65ad1250dc2746f709031bf66ee1a530f3c08638ab62c689fabdbd34"} Nov 25 09:30:03 crc kubenswrapper[4932]: I1125 09:30:03.566689 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eaf8f39e65ad1250dc2746f709031bf66ee1a530f3c08638ab62c689fabdbd34" Nov 25 09:30:03 crc kubenswrapper[4932]: I1125 09:30:03.566691 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d" Nov 25 09:30:03 crc kubenswrapper[4932]: I1125 09:30:03.875992 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj"] Nov 25 09:30:03 crc kubenswrapper[4932]: I1125 09:30:03.880136 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401005-fjmdj"] Nov 25 09:30:04 crc kubenswrapper[4932]: I1125 09:30:04.634759 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca7ad64f-5d34-4269-9faf-46bc2e3cab93" path="/var/lib/kubelet/pods/ca7ad64f-5d34-4269-9faf-46bc2e3cab93/volumes" Nov 25 09:30:06 crc kubenswrapper[4932]: I1125 09:30:06.345756 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:30:06 crc kubenswrapper[4932]: I1125 09:30:06.345800 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:30:06 crc kubenswrapper[4932]: I1125 09:30:06.397880 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:30:06 crc kubenswrapper[4932]: I1125 09:30:06.665616 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:30:06 crc kubenswrapper[4932]: I1125 09:30:06.747235 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9sxsh"] Nov 25 09:30:07 crc kubenswrapper[4932]: I1125 09:30:07.664637 4932 scope.go:117] "RemoveContainer" containerID="68d7c8e43ca36fc11bb0e1fb85e0b806bafbc454fe81ee3a899a6d5d66068264" Nov 25 09:30:08 crc kubenswrapper[4932]: I1125 09:30:08.599845 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9sxsh" podUID="a5efbbca-c276-4e6b-add7-df3a25f056ca" containerName="registry-server" containerID="cri-o://a0098caca51e5c588a357a8d7f2c0b36456f4813847985177d154db20c19acee" gracePeriod=2 Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.610179 4932 generic.go:334] "Generic (PLEG): container finished" podID="a5efbbca-c276-4e6b-add7-df3a25f056ca" containerID="a0098caca51e5c588a357a8d7f2c0b36456f4813847985177d154db20c19acee" exitCode=0 Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.610315 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9sxsh" event={"ID":"a5efbbca-c276-4e6b-add7-df3a25f056ca","Type":"ContainerDied","Data":"a0098caca51e5c588a357a8d7f2c0b36456f4813847985177d154db20c19acee"} Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.674333 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.838440 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5hvm\" (UniqueName: \"kubernetes.io/projected/a5efbbca-c276-4e6b-add7-df3a25f056ca-kube-api-access-s5hvm\") pod \"a5efbbca-c276-4e6b-add7-df3a25f056ca\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.838571 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-catalog-content\") pod \"a5efbbca-c276-4e6b-add7-df3a25f056ca\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.838674 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-utilities\") pod \"a5efbbca-c276-4e6b-add7-df3a25f056ca\" (UID: \"a5efbbca-c276-4e6b-add7-df3a25f056ca\") " Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.840635 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-utilities" (OuterVolumeSpecName: "utilities") pod "a5efbbca-c276-4e6b-add7-df3a25f056ca" (UID: "a5efbbca-c276-4e6b-add7-df3a25f056ca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.843372 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5efbbca-c276-4e6b-add7-df3a25f056ca-kube-api-access-s5hvm" (OuterVolumeSpecName: "kube-api-access-s5hvm") pod "a5efbbca-c276-4e6b-add7-df3a25f056ca" (UID: "a5efbbca-c276-4e6b-add7-df3a25f056ca"). InnerVolumeSpecName "kube-api-access-s5hvm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.891403 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a5efbbca-c276-4e6b-add7-df3a25f056ca" (UID: "a5efbbca-c276-4e6b-add7-df3a25f056ca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.940478 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5hvm\" (UniqueName: \"kubernetes.io/projected/a5efbbca-c276-4e6b-add7-df3a25f056ca-kube-api-access-s5hvm\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.940509 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:09 crc kubenswrapper[4932]: I1125 09:30:09.940519 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5efbbca-c276-4e6b-add7-df3a25f056ca-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:30:10 crc kubenswrapper[4932]: I1125 09:30:10.617697 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9sxsh" event={"ID":"a5efbbca-c276-4e6b-add7-df3a25f056ca","Type":"ContainerDied","Data":"74cbaae09f80d59fc9bb49cf8bc4007bd6807c36b956e6395c58f383b4bbaf90"} Nov 25 09:30:10 crc kubenswrapper[4932]: I1125 09:30:10.617760 4932 scope.go:117] "RemoveContainer" containerID="a0098caca51e5c588a357a8d7f2c0b36456f4813847985177d154db20c19acee" Nov 25 09:30:10 crc kubenswrapper[4932]: I1125 09:30:10.617758 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9sxsh" Nov 25 09:30:10 crc kubenswrapper[4932]: I1125 09:30:10.639599 4932 scope.go:117] "RemoveContainer" containerID="f3b5b96f9d6f4714df4a0015a84e2d5a159845c0267fec22875d9a724e5decd5" Nov 25 09:30:10 crc kubenswrapper[4932]: I1125 09:30:10.664634 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9sxsh"] Nov 25 09:30:10 crc kubenswrapper[4932]: I1125 09:30:10.670535 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9sxsh"] Nov 25 09:30:10 crc kubenswrapper[4932]: I1125 09:30:10.677566 4932 scope.go:117] "RemoveContainer" containerID="63aeed7246ba6ec29dbbfbc120d5de03d23f0f60e4921a6be9a95f14d29da736" Nov 25 09:30:11 crc kubenswrapper[4932]: I1125 09:30:11.606117 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:30:11 crc kubenswrapper[4932]: E1125 09:30:11.606437 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:30:12 crc kubenswrapper[4932]: I1125 09:30:12.617486 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5efbbca-c276-4e6b-add7-df3a25f056ca" path="/var/lib/kubelet/pods/a5efbbca-c276-4e6b-add7-df3a25f056ca/volumes" Nov 25 09:30:25 crc kubenswrapper[4932]: I1125 09:30:25.606074 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:30:25 crc kubenswrapper[4932]: E1125 09:30:25.606902 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:30:37 crc kubenswrapper[4932]: I1125 09:30:37.606041 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:30:37 crc kubenswrapper[4932]: E1125 09:30:37.607617 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:30:48 crc kubenswrapper[4932]: I1125 09:30:48.606936 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:30:48 crc kubenswrapper[4932]: E1125 09:30:48.610890 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:31:03 crc kubenswrapper[4932]: I1125 09:31:03.605418 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:31:03 crc kubenswrapper[4932]: E1125 09:31:03.606099 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:31:16 crc kubenswrapper[4932]: I1125 09:31:16.606424 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:31:17 crc kubenswrapper[4932]: I1125 09:31:17.185139 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"b232272031f121887dae9383ab3d82d3d2650993a9e7a8117b3d753c9a2df2c9"} Nov 25 09:33:37 crc kubenswrapper[4932]: I1125 09:33:37.180857 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:33:37 crc kubenswrapper[4932]: I1125 09:33:37.181546 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:34:07 crc kubenswrapper[4932]: I1125 09:34:07.180927 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:34:07 crc kubenswrapper[4932]: I1125 09:34:07.181616 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:34:37 crc kubenswrapper[4932]: I1125 09:34:37.180774 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:34:37 crc kubenswrapper[4932]: I1125 09:34:37.181326 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:34:37 crc kubenswrapper[4932]: I1125 09:34:37.181386 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:34:37 crc kubenswrapper[4932]: I1125 09:34:37.182082 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b232272031f121887dae9383ab3d82d3d2650993a9e7a8117b3d753c9a2df2c9"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:34:37 crc kubenswrapper[4932]: I1125 09:34:37.182153 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://b232272031f121887dae9383ab3d82d3d2650993a9e7a8117b3d753c9a2df2c9" gracePeriod=600 Nov 25 09:34:37 crc kubenswrapper[4932]: I1125 09:34:37.785086 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="b232272031f121887dae9383ab3d82d3d2650993a9e7a8117b3d753c9a2df2c9" exitCode=0 Nov 25 09:34:37 crc kubenswrapper[4932]: I1125 09:34:37.785152 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"b232272031f121887dae9383ab3d82d3d2650993a9e7a8117b3d753c9a2df2c9"} Nov 25 09:34:37 crc kubenswrapper[4932]: I1125 09:34:37.785480 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7"} Nov 25 09:34:37 crc kubenswrapper[4932]: I1125 09:34:37.785506 4932 scope.go:117] "RemoveContainer" containerID="cd377af45a72bc13b5e42c3cf9229a271dfe5aaec48009fa256a957d57731e13" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.865117 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2s6v8"] Nov 25 09:35:16 crc kubenswrapper[4932]: E1125 09:35:16.867270 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" containerName="registry-server" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.867290 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" containerName="registry-server" Nov 25 09:35:16 crc kubenswrapper[4932]: E1125 09:35:16.867309 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" containerName="extract-content" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.867317 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" containerName="extract-content" Nov 25 09:35:16 crc kubenswrapper[4932]: E1125 09:35:16.867358 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" containerName="extract-utilities" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.867370 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" containerName="extract-utilities" Nov 25 09:35:16 crc kubenswrapper[4932]: E1125 09:35:16.867395 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5efbbca-c276-4e6b-add7-df3a25f056ca" containerName="registry-server" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.867403 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5efbbca-c276-4e6b-add7-df3a25f056ca" containerName="registry-server" Nov 25 09:35:16 crc kubenswrapper[4932]: E1125 09:35:16.867441 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5efbbca-c276-4e6b-add7-df3a25f056ca" containerName="extract-utilities" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.867450 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5efbbca-c276-4e6b-add7-df3a25f056ca" containerName="extract-utilities" Nov 25 09:35:16 crc kubenswrapper[4932]: E1125 09:35:16.867461 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a398f8a1-0132-4daf-b96b-885c2c15bcfa" containerName="collect-profiles" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.867468 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a398f8a1-0132-4daf-b96b-885c2c15bcfa" containerName="collect-profiles" Nov 25 09:35:16 crc kubenswrapper[4932]: E1125 09:35:16.867482 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5efbbca-c276-4e6b-add7-df3a25f056ca" containerName="extract-content" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.867489 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5efbbca-c276-4e6b-add7-df3a25f056ca" containerName="extract-content" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.867706 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a398f8a1-0132-4daf-b96b-885c2c15bcfa" containerName="collect-profiles" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.867728 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5efbbca-c276-4e6b-add7-df3a25f056ca" containerName="registry-server" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.867766 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e38afc1-5f0c-4066-b6b7-8cbba7f5e052" containerName="registry-server" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.869316 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.914404 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2s6v8"] Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.947584 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-catalog-content\") pod \"redhat-operators-2s6v8\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.947696 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-utilities\") pod \"redhat-operators-2s6v8\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:16 crc kubenswrapper[4932]: I1125 09:35:16.947727 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69slv\" (UniqueName: \"kubernetes.io/projected/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-kube-api-access-69slv\") pod \"redhat-operators-2s6v8\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:17 crc kubenswrapper[4932]: I1125 09:35:17.048815 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-catalog-content\") pod \"redhat-operators-2s6v8\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:17 crc kubenswrapper[4932]: I1125 09:35:17.048922 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-utilities\") pod \"redhat-operators-2s6v8\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:17 crc kubenswrapper[4932]: I1125 09:35:17.048950 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69slv\" (UniqueName: \"kubernetes.io/projected/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-kube-api-access-69slv\") pod \"redhat-operators-2s6v8\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:17 crc kubenswrapper[4932]: I1125 09:35:17.049407 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-catalog-content\") pod \"redhat-operators-2s6v8\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:17 crc kubenswrapper[4932]: I1125 09:35:17.049443 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-utilities\") pod \"redhat-operators-2s6v8\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:17 crc kubenswrapper[4932]: I1125 09:35:17.072215 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69slv\" (UniqueName: \"kubernetes.io/projected/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-kube-api-access-69slv\") pod \"redhat-operators-2s6v8\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:17 crc kubenswrapper[4932]: I1125 09:35:17.201616 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:17 crc kubenswrapper[4932]: I1125 09:35:17.647510 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2s6v8"] Nov 25 09:35:18 crc kubenswrapper[4932]: I1125 09:35:18.109076 4932 generic.go:334] "Generic (PLEG): container finished" podID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" containerID="7e68f646608d11c00dae7b175ae5c155dc53a8997aec12c9a1ad948be606329e" exitCode=0 Nov 25 09:35:18 crc kubenswrapper[4932]: I1125 09:35:18.109173 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2s6v8" event={"ID":"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f","Type":"ContainerDied","Data":"7e68f646608d11c00dae7b175ae5c155dc53a8997aec12c9a1ad948be606329e"} Nov 25 09:35:18 crc kubenswrapper[4932]: I1125 09:35:18.109378 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2s6v8" event={"ID":"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f","Type":"ContainerStarted","Data":"f5156b6e35cf313572e67bd2fbfa7186bef906a551821bb7ee113683cce01a1d"} Nov 25 09:35:18 crc kubenswrapper[4932]: I1125 09:35:18.110743 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:35:19 crc kubenswrapper[4932]: I1125 09:35:19.122141 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2s6v8" event={"ID":"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f","Type":"ContainerStarted","Data":"281d2ac9bb4be8795fc7a67a2444b2cfc175f7b71619ac45a03341872179b341"} Nov 25 09:35:20 crc kubenswrapper[4932]: I1125 09:35:20.131915 4932 generic.go:334] "Generic (PLEG): container finished" podID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" containerID="281d2ac9bb4be8795fc7a67a2444b2cfc175f7b71619ac45a03341872179b341" exitCode=0 Nov 25 09:35:20 crc kubenswrapper[4932]: I1125 09:35:20.131975 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2s6v8" event={"ID":"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f","Type":"ContainerDied","Data":"281d2ac9bb4be8795fc7a67a2444b2cfc175f7b71619ac45a03341872179b341"} Nov 25 09:35:21 crc kubenswrapper[4932]: I1125 09:35:21.142561 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2s6v8" event={"ID":"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f","Type":"ContainerStarted","Data":"bdc34420eb6790ff30954d24a2104cd6b401961a5ec03e84b3ab38ff61cdb03f"} Nov 25 09:35:21 crc kubenswrapper[4932]: I1125 09:35:21.163320 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2s6v8" podStartSLOduration=2.515372989 podStartE2EDuration="5.163290464s" podCreationTimestamp="2025-11-25 09:35:16 +0000 UTC" firstStartedPulling="2025-11-25 09:35:18.110498852 +0000 UTC m=+2778.236528415" lastFinishedPulling="2025-11-25 09:35:20.758416327 +0000 UTC m=+2780.884445890" observedRunningTime="2025-11-25 09:35:21.158720726 +0000 UTC m=+2781.284750309" watchObservedRunningTime="2025-11-25 09:35:21.163290464 +0000 UTC m=+2781.289320047" Nov 25 09:35:27 crc kubenswrapper[4932]: I1125 09:35:27.202919 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:27 crc kubenswrapper[4932]: I1125 09:35:27.203752 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:27 crc kubenswrapper[4932]: I1125 09:35:27.257925 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:28 crc kubenswrapper[4932]: I1125 09:35:28.236941 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:29 crc kubenswrapper[4932]: I1125 09:35:29.496662 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2s6v8"] Nov 25 09:35:30 crc kubenswrapper[4932]: I1125 09:35:30.206709 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2s6v8" podUID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" containerName="registry-server" containerID="cri-o://bdc34420eb6790ff30954d24a2104cd6b401961a5ec03e84b3ab38ff61cdb03f" gracePeriod=2 Nov 25 09:35:31 crc kubenswrapper[4932]: I1125 09:35:31.216053 4932 generic.go:334] "Generic (PLEG): container finished" podID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" containerID="bdc34420eb6790ff30954d24a2104cd6b401961a5ec03e84b3ab38ff61cdb03f" exitCode=0 Nov 25 09:35:31 crc kubenswrapper[4932]: I1125 09:35:31.216140 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2s6v8" event={"ID":"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f","Type":"ContainerDied","Data":"bdc34420eb6790ff30954d24a2104cd6b401961a5ec03e84b3ab38ff61cdb03f"} Nov 25 09:35:31 crc kubenswrapper[4932]: I1125 09:35:31.694336 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:31 crc kubenswrapper[4932]: I1125 09:35:31.876706 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-catalog-content\") pod \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " Nov 25 09:35:31 crc kubenswrapper[4932]: I1125 09:35:31.876841 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69slv\" (UniqueName: \"kubernetes.io/projected/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-kube-api-access-69slv\") pod \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " Nov 25 09:35:31 crc kubenswrapper[4932]: I1125 09:35:31.876899 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-utilities\") pod \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\" (UID: \"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f\") " Nov 25 09:35:31 crc kubenswrapper[4932]: I1125 09:35:31.878115 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-utilities" (OuterVolumeSpecName: "utilities") pod "c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" (UID: "c223a8fc-89e8-4ab4-b7fc-191c3f0f897f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:35:31 crc kubenswrapper[4932]: I1125 09:35:31.886072 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-kube-api-access-69slv" (OuterVolumeSpecName: "kube-api-access-69slv") pod "c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" (UID: "c223a8fc-89e8-4ab4-b7fc-191c3f0f897f"). InnerVolumeSpecName "kube-api-access-69slv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:35:31 crc kubenswrapper[4932]: I1125 09:35:31.978245 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" (UID: "c223a8fc-89e8-4ab4-b7fc-191c3f0f897f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:35:31 crc kubenswrapper[4932]: I1125 09:35:31.978379 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69slv\" (UniqueName: \"kubernetes.io/projected/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-kube-api-access-69slv\") on node \"crc\" DevicePath \"\"" Nov 25 09:35:31 crc kubenswrapper[4932]: I1125 09:35:31.978402 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:35:32 crc kubenswrapper[4932]: I1125 09:35:32.080957 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:35:32 crc kubenswrapper[4932]: I1125 09:35:32.227744 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2s6v8" event={"ID":"c223a8fc-89e8-4ab4-b7fc-191c3f0f897f","Type":"ContainerDied","Data":"f5156b6e35cf313572e67bd2fbfa7186bef906a551821bb7ee113683cce01a1d"} Nov 25 09:35:32 crc kubenswrapper[4932]: I1125 09:35:32.227824 4932 scope.go:117] "RemoveContainer" containerID="bdc34420eb6790ff30954d24a2104cd6b401961a5ec03e84b3ab38ff61cdb03f" Nov 25 09:35:32 crc kubenswrapper[4932]: I1125 09:35:32.227847 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2s6v8" Nov 25 09:35:32 crc kubenswrapper[4932]: I1125 09:35:32.248734 4932 scope.go:117] "RemoveContainer" containerID="281d2ac9bb4be8795fc7a67a2444b2cfc175f7b71619ac45a03341872179b341" Nov 25 09:35:32 crc kubenswrapper[4932]: I1125 09:35:32.271348 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2s6v8"] Nov 25 09:35:32 crc kubenswrapper[4932]: I1125 09:35:32.276514 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2s6v8"] Nov 25 09:35:32 crc kubenswrapper[4932]: I1125 09:35:32.284829 4932 scope.go:117] "RemoveContainer" containerID="7e68f646608d11c00dae7b175ae5c155dc53a8997aec12c9a1ad948be606329e" Nov 25 09:35:32 crc kubenswrapper[4932]: I1125 09:35:32.616041 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" path="/var/lib/kubelet/pods/c223a8fc-89e8-4ab4-b7fc-191c3f0f897f/volumes" Nov 25 09:36:37 crc kubenswrapper[4932]: I1125 09:36:37.181246 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:36:37 crc kubenswrapper[4932]: I1125 09:36:37.181889 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:37:07 crc kubenswrapper[4932]: I1125 09:37:07.181069 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:37:07 crc kubenswrapper[4932]: I1125 09:37:07.181604 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:37:37 crc kubenswrapper[4932]: I1125 09:37:37.181448 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:37:37 crc kubenswrapper[4932]: I1125 09:37:37.182007 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:37:37 crc kubenswrapper[4932]: I1125 09:37:37.182050 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:37:37 crc kubenswrapper[4932]: I1125 09:37:37.182638 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:37:37 crc kubenswrapper[4932]: I1125 09:37:37.182689 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" gracePeriod=600 Nov 25 09:37:37 crc kubenswrapper[4932]: E1125 09:37:37.302144 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:37:37 crc kubenswrapper[4932]: I1125 09:37:37.833329 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" exitCode=0 Nov 25 09:37:37 crc kubenswrapper[4932]: I1125 09:37:37.833376 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7"} Nov 25 09:37:37 crc kubenswrapper[4932]: I1125 09:37:37.833422 4932 scope.go:117] "RemoveContainer" containerID="b232272031f121887dae9383ab3d82d3d2650993a9e7a8117b3d753c9a2df2c9" Nov 25 09:37:37 crc kubenswrapper[4932]: I1125 09:37:37.833903 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:37:37 crc kubenswrapper[4932]: E1125 09:37:37.834226 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:37:52 crc kubenswrapper[4932]: I1125 09:37:52.606118 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:37:52 crc kubenswrapper[4932]: E1125 09:37:52.607561 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:37:55 crc kubenswrapper[4932]: I1125 09:37:55.863643 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pt9jc"] Nov 25 09:37:55 crc kubenswrapper[4932]: E1125 09:37:55.866669 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" containerName="extract-content" Nov 25 09:37:55 crc kubenswrapper[4932]: I1125 09:37:55.866934 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" containerName="extract-content" Nov 25 09:37:55 crc kubenswrapper[4932]: E1125 09:37:55.867161 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" containerName="extract-utilities" Nov 25 09:37:55 crc kubenswrapper[4932]: I1125 09:37:55.867438 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" containerName="extract-utilities" Nov 25 09:37:55 crc kubenswrapper[4932]: E1125 09:37:55.867589 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" containerName="registry-server" Nov 25 09:37:55 crc kubenswrapper[4932]: I1125 09:37:55.867726 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" containerName="registry-server" Nov 25 09:37:55 crc kubenswrapper[4932]: I1125 09:37:55.868254 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c223a8fc-89e8-4ab4-b7fc-191c3f0f897f" containerName="registry-server" Nov 25 09:37:55 crc kubenswrapper[4932]: I1125 09:37:55.870363 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:37:55 crc kubenswrapper[4932]: I1125 09:37:55.883667 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pt9jc"] Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.026560 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-utilities\") pod \"community-operators-pt9jc\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.026620 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-catalog-content\") pod \"community-operators-pt9jc\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.026739 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2ckv\" (UniqueName: \"kubernetes.io/projected/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-kube-api-access-z2ckv\") pod \"community-operators-pt9jc\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.128203 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-utilities\") pod \"community-operators-pt9jc\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.128537 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-catalog-content\") pod \"community-operators-pt9jc\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.128635 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2ckv\" (UniqueName: \"kubernetes.io/projected/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-kube-api-access-z2ckv\") pod \"community-operators-pt9jc\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.129298 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-catalog-content\") pod \"community-operators-pt9jc\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.129316 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-utilities\") pod \"community-operators-pt9jc\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.151534 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2ckv\" (UniqueName: \"kubernetes.io/projected/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-kube-api-access-z2ckv\") pod \"community-operators-pt9jc\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.197274 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.677830 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pt9jc"] Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.986252 4932 generic.go:334] "Generic (PLEG): container finished" podID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" containerID="95b171682326c246fba98f3c30d3ec54b859f3a3d81333394c0b323a0006f178" exitCode=0 Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.986309 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pt9jc" event={"ID":"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7","Type":"ContainerDied","Data":"95b171682326c246fba98f3c30d3ec54b859f3a3d81333394c0b323a0006f178"} Nov 25 09:37:56 crc kubenswrapper[4932]: I1125 09:37:56.986361 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pt9jc" event={"ID":"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7","Type":"ContainerStarted","Data":"63716d8b1a0ecbe82359cd587cfbb981183bb9e75529449dd3cd9750a63641cc"} Nov 25 09:37:57 crc kubenswrapper[4932]: I1125 09:37:57.997759 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pt9jc" event={"ID":"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7","Type":"ContainerStarted","Data":"8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5"} Nov 25 09:37:59 crc kubenswrapper[4932]: I1125 09:37:59.009683 4932 generic.go:334] "Generic (PLEG): container finished" podID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" containerID="8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5" exitCode=0 Nov 25 09:37:59 crc kubenswrapper[4932]: I1125 09:37:59.010128 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pt9jc" event={"ID":"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7","Type":"ContainerDied","Data":"8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5"} Nov 25 09:38:00 crc kubenswrapper[4932]: I1125 09:38:00.019798 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pt9jc" event={"ID":"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7","Type":"ContainerStarted","Data":"a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb"} Nov 25 09:38:00 crc kubenswrapper[4932]: I1125 09:38:00.050297 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pt9jc" podStartSLOduration=2.510004928 podStartE2EDuration="5.050260001s" podCreationTimestamp="2025-11-25 09:37:55 +0000 UTC" firstStartedPulling="2025-11-25 09:37:56.987909464 +0000 UTC m=+2937.113939027" lastFinishedPulling="2025-11-25 09:37:59.528164537 +0000 UTC m=+2939.654194100" observedRunningTime="2025-11-25 09:38:00.040524214 +0000 UTC m=+2940.166553777" watchObservedRunningTime="2025-11-25 09:38:00.050260001 +0000 UTC m=+2940.176289574" Nov 25 09:38:05 crc kubenswrapper[4932]: I1125 09:38:05.606633 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:38:05 crc kubenswrapper[4932]: E1125 09:38:05.608072 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:38:06 crc kubenswrapper[4932]: I1125 09:38:06.198338 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:38:06 crc kubenswrapper[4932]: I1125 09:38:06.198439 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:38:06 crc kubenswrapper[4932]: I1125 09:38:06.241642 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:38:07 crc kubenswrapper[4932]: I1125 09:38:07.129930 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:38:07 crc kubenswrapper[4932]: I1125 09:38:07.176988 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pt9jc"] Nov 25 09:38:09 crc kubenswrapper[4932]: I1125 09:38:09.100604 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pt9jc" podUID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" containerName="registry-server" containerID="cri-o://a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb" gracePeriod=2 Nov 25 09:38:09 crc kubenswrapper[4932]: I1125 09:38:09.519717 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:38:09 crc kubenswrapper[4932]: I1125 09:38:09.532796 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-utilities\") pod \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " Nov 25 09:38:09 crc kubenswrapper[4932]: I1125 09:38:09.532926 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2ckv\" (UniqueName: \"kubernetes.io/projected/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-kube-api-access-z2ckv\") pod \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " Nov 25 09:38:09 crc kubenswrapper[4932]: I1125 09:38:09.532985 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-catalog-content\") pod \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\" (UID: \"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7\") " Nov 25 09:38:09 crc kubenswrapper[4932]: I1125 09:38:09.534384 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-utilities" (OuterVolumeSpecName: "utilities") pod "af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" (UID: "af8de5ba-aef6-4d9f-92c9-c94a8cf357a7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:38:09 crc kubenswrapper[4932]: I1125 09:38:09.540316 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-kube-api-access-z2ckv" (OuterVolumeSpecName: "kube-api-access-z2ckv") pod "af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" (UID: "af8de5ba-aef6-4d9f-92c9-c94a8cf357a7"). InnerVolumeSpecName "kube-api-access-z2ckv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:38:09 crc kubenswrapper[4932]: I1125 09:38:09.589799 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" (UID: "af8de5ba-aef6-4d9f-92c9-c94a8cf357a7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:38:09 crc kubenswrapper[4932]: I1125 09:38:09.634601 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:09 crc kubenswrapper[4932]: I1125 09:38:09.634643 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2ckv\" (UniqueName: \"kubernetes.io/projected/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-kube-api-access-z2ckv\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:09 crc kubenswrapper[4932]: I1125 09:38:09.634657 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.112530 4932 generic.go:334] "Generic (PLEG): container finished" podID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" containerID="a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb" exitCode=0 Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.112624 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pt9jc" Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.112687 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pt9jc" event={"ID":"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7","Type":"ContainerDied","Data":"a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb"} Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.113113 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pt9jc" event={"ID":"af8de5ba-aef6-4d9f-92c9-c94a8cf357a7","Type":"ContainerDied","Data":"63716d8b1a0ecbe82359cd587cfbb981183bb9e75529449dd3cd9750a63641cc"} Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.113152 4932 scope.go:117] "RemoveContainer" containerID="a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb" Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.133148 4932 scope.go:117] "RemoveContainer" containerID="8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5" Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.148537 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pt9jc"] Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.153768 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pt9jc"] Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.173163 4932 scope.go:117] "RemoveContainer" containerID="95b171682326c246fba98f3c30d3ec54b859f3a3d81333394c0b323a0006f178" Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.197902 4932 scope.go:117] "RemoveContainer" containerID="a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb" Nov 25 09:38:10 crc kubenswrapper[4932]: E1125 09:38:10.198741 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb\": container with ID starting with a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb not found: ID does not exist" containerID="a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb" Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.198774 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb"} err="failed to get container status \"a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb\": rpc error: code = NotFound desc = could not find container \"a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb\": container with ID starting with a3c91fd5342ebc4844cb60117cc3b0f22e255b03478f04a2e84184c60203d8eb not found: ID does not exist" Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.198797 4932 scope.go:117] "RemoveContainer" containerID="8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5" Nov 25 09:38:10 crc kubenswrapper[4932]: E1125 09:38:10.199156 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5\": container with ID starting with 8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5 not found: ID does not exist" containerID="8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5" Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.199175 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5"} err="failed to get container status \"8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5\": rpc error: code = NotFound desc = could not find container \"8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5\": container with ID starting with 8d279fd6183697c385ef16bd3b9b87e221a0ecba3407e627cb045688e8159fc5 not found: ID does not exist" Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.199203 4932 scope.go:117] "RemoveContainer" containerID="95b171682326c246fba98f3c30d3ec54b859f3a3d81333394c0b323a0006f178" Nov 25 09:38:10 crc kubenswrapper[4932]: E1125 09:38:10.199799 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95b171682326c246fba98f3c30d3ec54b859f3a3d81333394c0b323a0006f178\": container with ID starting with 95b171682326c246fba98f3c30d3ec54b859f3a3d81333394c0b323a0006f178 not found: ID does not exist" containerID="95b171682326c246fba98f3c30d3ec54b859f3a3d81333394c0b323a0006f178" Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.199821 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95b171682326c246fba98f3c30d3ec54b859f3a3d81333394c0b323a0006f178"} err="failed to get container status \"95b171682326c246fba98f3c30d3ec54b859f3a3d81333394c0b323a0006f178\": rpc error: code = NotFound desc = could not find container \"95b171682326c246fba98f3c30d3ec54b859f3a3d81333394c0b323a0006f178\": container with ID starting with 95b171682326c246fba98f3c30d3ec54b859f3a3d81333394c0b323a0006f178 not found: ID does not exist" Nov 25 09:38:10 crc kubenswrapper[4932]: I1125 09:38:10.620952 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" path="/var/lib/kubelet/pods/af8de5ba-aef6-4d9f-92c9-c94a8cf357a7/volumes" Nov 25 09:38:18 crc kubenswrapper[4932]: I1125 09:38:18.606430 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:38:18 crc kubenswrapper[4932]: E1125 09:38:18.607269 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:38:32 crc kubenswrapper[4932]: I1125 09:38:32.606005 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:38:32 crc kubenswrapper[4932]: E1125 09:38:32.606734 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:38:44 crc kubenswrapper[4932]: I1125 09:38:44.607118 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:38:44 crc kubenswrapper[4932]: E1125 09:38:44.607969 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:38:59 crc kubenswrapper[4932]: I1125 09:38:59.606400 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:38:59 crc kubenswrapper[4932]: E1125 09:38:59.609145 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:39:13 crc kubenswrapper[4932]: I1125 09:39:13.606852 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:39:13 crc kubenswrapper[4932]: E1125 09:39:13.607974 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:39:27 crc kubenswrapper[4932]: I1125 09:39:27.606949 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:39:27 crc kubenswrapper[4932]: E1125 09:39:27.608225 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:39:39 crc kubenswrapper[4932]: I1125 09:39:39.606228 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:39:39 crc kubenswrapper[4932]: E1125 09:39:39.607542 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.107756 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hfbq8"] Nov 25 09:39:43 crc kubenswrapper[4932]: E1125 09:39:43.108437 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" containerName="extract-utilities" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.108450 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" containerName="extract-utilities" Nov 25 09:39:43 crc kubenswrapper[4932]: E1125 09:39:43.108456 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" containerName="extract-content" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.108462 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" containerName="extract-content" Nov 25 09:39:43 crc kubenswrapper[4932]: E1125 09:39:43.108480 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" containerName="registry-server" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.108485 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" containerName="registry-server" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.108650 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="af8de5ba-aef6-4d9f-92c9-c94a8cf357a7" containerName="registry-server" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.109782 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.134803 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hfbq8"] Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.137861 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-catalog-content\") pod \"redhat-marketplace-hfbq8\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.137943 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjk9k\" (UniqueName: \"kubernetes.io/projected/97d99334-3090-42b4-ab01-a31ac7b60443-kube-api-access-zjk9k\") pod \"redhat-marketplace-hfbq8\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.137970 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-utilities\") pod \"redhat-marketplace-hfbq8\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.238708 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjk9k\" (UniqueName: \"kubernetes.io/projected/97d99334-3090-42b4-ab01-a31ac7b60443-kube-api-access-zjk9k\") pod \"redhat-marketplace-hfbq8\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.238757 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-utilities\") pod \"redhat-marketplace-hfbq8\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.238816 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-catalog-content\") pod \"redhat-marketplace-hfbq8\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.239339 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-catalog-content\") pod \"redhat-marketplace-hfbq8\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.239396 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-utilities\") pod \"redhat-marketplace-hfbq8\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.261519 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjk9k\" (UniqueName: \"kubernetes.io/projected/97d99334-3090-42b4-ab01-a31ac7b60443-kube-api-access-zjk9k\") pod \"redhat-marketplace-hfbq8\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.437466 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:43 crc kubenswrapper[4932]: I1125 09:39:43.902518 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hfbq8"] Nov 25 09:39:44 crc kubenswrapper[4932]: I1125 09:39:44.849773 4932 generic.go:334] "Generic (PLEG): container finished" podID="97d99334-3090-42b4-ab01-a31ac7b60443" containerID="1240b5cbfd5b2e4437d8980337f86c922fc41eb18a86b842ea9b3a7cbc68f1c2" exitCode=0 Nov 25 09:39:44 crc kubenswrapper[4932]: I1125 09:39:44.850007 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfbq8" event={"ID":"97d99334-3090-42b4-ab01-a31ac7b60443","Type":"ContainerDied","Data":"1240b5cbfd5b2e4437d8980337f86c922fc41eb18a86b842ea9b3a7cbc68f1c2"} Nov 25 09:39:44 crc kubenswrapper[4932]: I1125 09:39:44.850089 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfbq8" event={"ID":"97d99334-3090-42b4-ab01-a31ac7b60443","Type":"ContainerStarted","Data":"006ec6b4b0c4c7630b6fb90ae9d71fb9c13851bbbd3b86cf0c89651768df76e9"} Nov 25 09:39:45 crc kubenswrapper[4932]: I1125 09:39:45.863396 4932 generic.go:334] "Generic (PLEG): container finished" podID="97d99334-3090-42b4-ab01-a31ac7b60443" containerID="d22df4efb263e6dfda4930050b39d104f059d282a93306f38dcb5752767d5557" exitCode=0 Nov 25 09:39:45 crc kubenswrapper[4932]: I1125 09:39:45.863612 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfbq8" event={"ID":"97d99334-3090-42b4-ab01-a31ac7b60443","Type":"ContainerDied","Data":"d22df4efb263e6dfda4930050b39d104f059d282a93306f38dcb5752767d5557"} Nov 25 09:39:46 crc kubenswrapper[4932]: I1125 09:39:46.871410 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfbq8" event={"ID":"97d99334-3090-42b4-ab01-a31ac7b60443","Type":"ContainerStarted","Data":"0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95"} Nov 25 09:39:46 crc kubenswrapper[4932]: I1125 09:39:46.887138 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hfbq8" podStartSLOduration=2.432162399 podStartE2EDuration="3.887124576s" podCreationTimestamp="2025-11-25 09:39:43 +0000 UTC" firstStartedPulling="2025-11-25 09:39:44.854938128 +0000 UTC m=+3044.980967691" lastFinishedPulling="2025-11-25 09:39:46.309900295 +0000 UTC m=+3046.435929868" observedRunningTime="2025-11-25 09:39:46.886794087 +0000 UTC m=+3047.012823670" watchObservedRunningTime="2025-11-25 09:39:46.887124576 +0000 UTC m=+3047.013154129" Nov 25 09:39:50 crc kubenswrapper[4932]: I1125 09:39:50.612900 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:39:50 crc kubenswrapper[4932]: E1125 09:39:50.614314 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:39:53 crc kubenswrapper[4932]: I1125 09:39:53.438308 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:53 crc kubenswrapper[4932]: I1125 09:39:53.438875 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:53 crc kubenswrapper[4932]: I1125 09:39:53.484033 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:54 crc kubenswrapper[4932]: I1125 09:39:54.016518 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:54 crc kubenswrapper[4932]: I1125 09:39:54.072867 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hfbq8"] Nov 25 09:39:55 crc kubenswrapper[4932]: I1125 09:39:55.945848 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hfbq8" podUID="97d99334-3090-42b4-ab01-a31ac7b60443" containerName="registry-server" containerID="cri-o://0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95" gracePeriod=2 Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.317834 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.459084 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-catalog-content\") pod \"97d99334-3090-42b4-ab01-a31ac7b60443\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.459230 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjk9k\" (UniqueName: \"kubernetes.io/projected/97d99334-3090-42b4-ab01-a31ac7b60443-kube-api-access-zjk9k\") pod \"97d99334-3090-42b4-ab01-a31ac7b60443\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.459312 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-utilities\") pod \"97d99334-3090-42b4-ab01-a31ac7b60443\" (UID: \"97d99334-3090-42b4-ab01-a31ac7b60443\") " Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.460832 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-utilities" (OuterVolumeSpecName: "utilities") pod "97d99334-3090-42b4-ab01-a31ac7b60443" (UID: "97d99334-3090-42b4-ab01-a31ac7b60443"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.472488 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97d99334-3090-42b4-ab01-a31ac7b60443-kube-api-access-zjk9k" (OuterVolumeSpecName: "kube-api-access-zjk9k") pod "97d99334-3090-42b4-ab01-a31ac7b60443" (UID: "97d99334-3090-42b4-ab01-a31ac7b60443"). InnerVolumeSpecName "kube-api-access-zjk9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.479461 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "97d99334-3090-42b4-ab01-a31ac7b60443" (UID: "97d99334-3090-42b4-ab01-a31ac7b60443"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.563172 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.563432 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/97d99334-3090-42b4-ab01-a31ac7b60443-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.563509 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjk9k\" (UniqueName: \"kubernetes.io/projected/97d99334-3090-42b4-ab01-a31ac7b60443-kube-api-access-zjk9k\") on node \"crc\" DevicePath \"\"" Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.956865 4932 generic.go:334] "Generic (PLEG): container finished" podID="97d99334-3090-42b4-ab01-a31ac7b60443" containerID="0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95" exitCode=0 Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.956918 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfbq8" event={"ID":"97d99334-3090-42b4-ab01-a31ac7b60443","Type":"ContainerDied","Data":"0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95"} Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.956946 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hfbq8" Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.956957 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfbq8" event={"ID":"97d99334-3090-42b4-ab01-a31ac7b60443","Type":"ContainerDied","Data":"006ec6b4b0c4c7630b6fb90ae9d71fb9c13851bbbd3b86cf0c89651768df76e9"} Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.956986 4932 scope.go:117] "RemoveContainer" containerID="0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95" Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.980252 4932 scope.go:117] "RemoveContainer" containerID="d22df4efb263e6dfda4930050b39d104f059d282a93306f38dcb5752767d5557" Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.983077 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hfbq8"] Nov 25 09:39:56 crc kubenswrapper[4932]: I1125 09:39:56.989469 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hfbq8"] Nov 25 09:39:57 crc kubenswrapper[4932]: I1125 09:39:57.000560 4932 scope.go:117] "RemoveContainer" containerID="1240b5cbfd5b2e4437d8980337f86c922fc41eb18a86b842ea9b3a7cbc68f1c2" Nov 25 09:39:57 crc kubenswrapper[4932]: I1125 09:39:57.035178 4932 scope.go:117] "RemoveContainer" containerID="0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95" Nov 25 09:39:57 crc kubenswrapper[4932]: E1125 09:39:57.035722 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95\": container with ID starting with 0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95 not found: ID does not exist" containerID="0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95" Nov 25 09:39:57 crc kubenswrapper[4932]: I1125 09:39:57.035771 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95"} err="failed to get container status \"0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95\": rpc error: code = NotFound desc = could not find container \"0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95\": container with ID starting with 0f9d36e8a6afa63189aef76e92d1dcb50c48123a025eb3ffb8dcec24623ebd95 not found: ID does not exist" Nov 25 09:39:57 crc kubenswrapper[4932]: I1125 09:39:57.035795 4932 scope.go:117] "RemoveContainer" containerID="d22df4efb263e6dfda4930050b39d104f059d282a93306f38dcb5752767d5557" Nov 25 09:39:57 crc kubenswrapper[4932]: E1125 09:39:57.036387 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d22df4efb263e6dfda4930050b39d104f059d282a93306f38dcb5752767d5557\": container with ID starting with d22df4efb263e6dfda4930050b39d104f059d282a93306f38dcb5752767d5557 not found: ID does not exist" containerID="d22df4efb263e6dfda4930050b39d104f059d282a93306f38dcb5752767d5557" Nov 25 09:39:57 crc kubenswrapper[4932]: I1125 09:39:57.036450 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d22df4efb263e6dfda4930050b39d104f059d282a93306f38dcb5752767d5557"} err="failed to get container status \"d22df4efb263e6dfda4930050b39d104f059d282a93306f38dcb5752767d5557\": rpc error: code = NotFound desc = could not find container \"d22df4efb263e6dfda4930050b39d104f059d282a93306f38dcb5752767d5557\": container with ID starting with d22df4efb263e6dfda4930050b39d104f059d282a93306f38dcb5752767d5557 not found: ID does not exist" Nov 25 09:39:57 crc kubenswrapper[4932]: I1125 09:39:57.036492 4932 scope.go:117] "RemoveContainer" containerID="1240b5cbfd5b2e4437d8980337f86c922fc41eb18a86b842ea9b3a7cbc68f1c2" Nov 25 09:39:57 crc kubenswrapper[4932]: E1125 09:39:57.036910 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1240b5cbfd5b2e4437d8980337f86c922fc41eb18a86b842ea9b3a7cbc68f1c2\": container with ID starting with 1240b5cbfd5b2e4437d8980337f86c922fc41eb18a86b842ea9b3a7cbc68f1c2 not found: ID does not exist" containerID="1240b5cbfd5b2e4437d8980337f86c922fc41eb18a86b842ea9b3a7cbc68f1c2" Nov 25 09:39:57 crc kubenswrapper[4932]: I1125 09:39:57.036941 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1240b5cbfd5b2e4437d8980337f86c922fc41eb18a86b842ea9b3a7cbc68f1c2"} err="failed to get container status \"1240b5cbfd5b2e4437d8980337f86c922fc41eb18a86b842ea9b3a7cbc68f1c2\": rpc error: code = NotFound desc = could not find container \"1240b5cbfd5b2e4437d8980337f86c922fc41eb18a86b842ea9b3a7cbc68f1c2\": container with ID starting with 1240b5cbfd5b2e4437d8980337f86c922fc41eb18a86b842ea9b3a7cbc68f1c2 not found: ID does not exist" Nov 25 09:39:58 crc kubenswrapper[4932]: I1125 09:39:58.621679 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97d99334-3090-42b4-ab01-a31ac7b60443" path="/var/lib/kubelet/pods/97d99334-3090-42b4-ab01-a31ac7b60443/volumes" Nov 25 09:40:02 crc kubenswrapper[4932]: I1125 09:40:02.605353 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:40:02 crc kubenswrapper[4932]: E1125 09:40:02.605795 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:40:15 crc kubenswrapper[4932]: I1125 09:40:15.606535 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:40:15 crc kubenswrapper[4932]: E1125 09:40:15.607451 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:40:27 crc kubenswrapper[4932]: I1125 09:40:27.606660 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:40:27 crc kubenswrapper[4932]: E1125 09:40:27.607606 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:40:38 crc kubenswrapper[4932]: I1125 09:40:38.606078 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:40:38 crc kubenswrapper[4932]: E1125 09:40:38.606991 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.198359 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-g9h9x"] Nov 25 09:40:42 crc kubenswrapper[4932]: E1125 09:40:42.200053 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97d99334-3090-42b4-ab01-a31ac7b60443" containerName="registry-server" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.200164 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="97d99334-3090-42b4-ab01-a31ac7b60443" containerName="registry-server" Nov 25 09:40:42 crc kubenswrapper[4932]: E1125 09:40:42.200275 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97d99334-3090-42b4-ab01-a31ac7b60443" containerName="extract-content" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.200352 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="97d99334-3090-42b4-ab01-a31ac7b60443" containerName="extract-content" Nov 25 09:40:42 crc kubenswrapper[4932]: E1125 09:40:42.200451 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97d99334-3090-42b4-ab01-a31ac7b60443" containerName="extract-utilities" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.200519 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="97d99334-3090-42b4-ab01-a31ac7b60443" containerName="extract-utilities" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.200768 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="97d99334-3090-42b4-ab01-a31ac7b60443" containerName="registry-server" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.205734 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.207735 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g9h9x"] Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.315019 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-utilities\") pod \"certified-operators-g9h9x\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.315095 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-catalog-content\") pod \"certified-operators-g9h9x\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.315267 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfhdw\" (UniqueName: \"kubernetes.io/projected/1effeeb4-6a39-404b-8804-2bf843301fbf-kube-api-access-pfhdw\") pod \"certified-operators-g9h9x\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.415976 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfhdw\" (UniqueName: \"kubernetes.io/projected/1effeeb4-6a39-404b-8804-2bf843301fbf-kube-api-access-pfhdw\") pod \"certified-operators-g9h9x\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.416076 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-utilities\") pod \"certified-operators-g9h9x\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.416104 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-catalog-content\") pod \"certified-operators-g9h9x\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.416684 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-catalog-content\") pod \"certified-operators-g9h9x\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.416804 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-utilities\") pod \"certified-operators-g9h9x\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.435700 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfhdw\" (UniqueName: \"kubernetes.io/projected/1effeeb4-6a39-404b-8804-2bf843301fbf-kube-api-access-pfhdw\") pod \"certified-operators-g9h9x\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.529439 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:42 crc kubenswrapper[4932]: I1125 09:40:42.829045 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g9h9x"] Nov 25 09:40:43 crc kubenswrapper[4932]: I1125 09:40:43.313075 4932 generic.go:334] "Generic (PLEG): container finished" podID="1effeeb4-6a39-404b-8804-2bf843301fbf" containerID="66400b9758e4badcc92a5954dc28db397735ed8dad2e303ea1b7707aaece1b95" exitCode=0 Nov 25 09:40:43 crc kubenswrapper[4932]: I1125 09:40:43.313156 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g9h9x" event={"ID":"1effeeb4-6a39-404b-8804-2bf843301fbf","Type":"ContainerDied","Data":"66400b9758e4badcc92a5954dc28db397735ed8dad2e303ea1b7707aaece1b95"} Nov 25 09:40:43 crc kubenswrapper[4932]: I1125 09:40:43.313604 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g9h9x" event={"ID":"1effeeb4-6a39-404b-8804-2bf843301fbf","Type":"ContainerStarted","Data":"3fbbe3827e882bde7058eaad739286f7aa036022209ea3998942c7b95ae0273d"} Nov 25 09:40:43 crc kubenswrapper[4932]: I1125 09:40:43.316053 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:40:45 crc kubenswrapper[4932]: I1125 09:40:45.333240 4932 generic.go:334] "Generic (PLEG): container finished" podID="1effeeb4-6a39-404b-8804-2bf843301fbf" containerID="5fc870af199d176fe903d577632d3d2219e8ed38eb77e13e181a63ebff6edcb7" exitCode=0 Nov 25 09:40:45 crc kubenswrapper[4932]: I1125 09:40:45.333363 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g9h9x" event={"ID":"1effeeb4-6a39-404b-8804-2bf843301fbf","Type":"ContainerDied","Data":"5fc870af199d176fe903d577632d3d2219e8ed38eb77e13e181a63ebff6edcb7"} Nov 25 09:40:46 crc kubenswrapper[4932]: I1125 09:40:46.344109 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g9h9x" event={"ID":"1effeeb4-6a39-404b-8804-2bf843301fbf","Type":"ContainerStarted","Data":"21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82"} Nov 25 09:40:46 crc kubenswrapper[4932]: I1125 09:40:46.379850 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-g9h9x" podStartSLOduration=1.930102635 podStartE2EDuration="4.379810623s" podCreationTimestamp="2025-11-25 09:40:42 +0000 UTC" firstStartedPulling="2025-11-25 09:40:43.3158144 +0000 UTC m=+3103.441843963" lastFinishedPulling="2025-11-25 09:40:45.765522388 +0000 UTC m=+3105.891551951" observedRunningTime="2025-11-25 09:40:46.372937178 +0000 UTC m=+3106.498966751" watchObservedRunningTime="2025-11-25 09:40:46.379810623 +0000 UTC m=+3106.505840186" Nov 25 09:40:52 crc kubenswrapper[4932]: I1125 09:40:52.530362 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:52 crc kubenswrapper[4932]: I1125 09:40:52.530761 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:52 crc kubenswrapper[4932]: I1125 09:40:52.584708 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:52 crc kubenswrapper[4932]: I1125 09:40:52.606518 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:40:52 crc kubenswrapper[4932]: E1125 09:40:52.606861 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:40:53 crc kubenswrapper[4932]: I1125 09:40:53.473510 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:53 crc kubenswrapper[4932]: I1125 09:40:53.523539 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g9h9x"] Nov 25 09:40:55 crc kubenswrapper[4932]: I1125 09:40:55.431044 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-g9h9x" podUID="1effeeb4-6a39-404b-8804-2bf843301fbf" containerName="registry-server" containerID="cri-o://21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82" gracePeriod=2 Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.345669 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.442456 4932 generic.go:334] "Generic (PLEG): container finished" podID="1effeeb4-6a39-404b-8804-2bf843301fbf" containerID="21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82" exitCode=0 Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.442584 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g9h9x" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.442558 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g9h9x" event={"ID":"1effeeb4-6a39-404b-8804-2bf843301fbf","Type":"ContainerDied","Data":"21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82"} Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.442800 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g9h9x" event={"ID":"1effeeb4-6a39-404b-8804-2bf843301fbf","Type":"ContainerDied","Data":"3fbbe3827e882bde7058eaad739286f7aa036022209ea3998942c7b95ae0273d"} Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.442841 4932 scope.go:117] "RemoveContainer" containerID="21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.451436 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfhdw\" (UniqueName: \"kubernetes.io/projected/1effeeb4-6a39-404b-8804-2bf843301fbf-kube-api-access-pfhdw\") pod \"1effeeb4-6a39-404b-8804-2bf843301fbf\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.451568 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-catalog-content\") pod \"1effeeb4-6a39-404b-8804-2bf843301fbf\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.451666 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-utilities\") pod \"1effeeb4-6a39-404b-8804-2bf843301fbf\" (UID: \"1effeeb4-6a39-404b-8804-2bf843301fbf\") " Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.453361 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-utilities" (OuterVolumeSpecName: "utilities") pod "1effeeb4-6a39-404b-8804-2bf843301fbf" (UID: "1effeeb4-6a39-404b-8804-2bf843301fbf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.457889 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1effeeb4-6a39-404b-8804-2bf843301fbf-kube-api-access-pfhdw" (OuterVolumeSpecName: "kube-api-access-pfhdw") pod "1effeeb4-6a39-404b-8804-2bf843301fbf" (UID: "1effeeb4-6a39-404b-8804-2bf843301fbf"). InnerVolumeSpecName "kube-api-access-pfhdw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.462962 4932 scope.go:117] "RemoveContainer" containerID="5fc870af199d176fe903d577632d3d2219e8ed38eb77e13e181a63ebff6edcb7" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.501557 4932 scope.go:117] "RemoveContainer" containerID="66400b9758e4badcc92a5954dc28db397735ed8dad2e303ea1b7707aaece1b95" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.507578 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1effeeb4-6a39-404b-8804-2bf843301fbf" (UID: "1effeeb4-6a39-404b-8804-2bf843301fbf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.529439 4932 scope.go:117] "RemoveContainer" containerID="21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82" Nov 25 09:40:56 crc kubenswrapper[4932]: E1125 09:40:56.530527 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82\": container with ID starting with 21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82 not found: ID does not exist" containerID="21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.530610 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82"} err="failed to get container status \"21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82\": rpc error: code = NotFound desc = could not find container \"21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82\": container with ID starting with 21fb83fac2cb905067bc3e6d8bd3cb1b96938977e2690ef62a79c0449f886d82 not found: ID does not exist" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.530653 4932 scope.go:117] "RemoveContainer" containerID="5fc870af199d176fe903d577632d3d2219e8ed38eb77e13e181a63ebff6edcb7" Nov 25 09:40:56 crc kubenswrapper[4932]: E1125 09:40:56.531213 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fc870af199d176fe903d577632d3d2219e8ed38eb77e13e181a63ebff6edcb7\": container with ID starting with 5fc870af199d176fe903d577632d3d2219e8ed38eb77e13e181a63ebff6edcb7 not found: ID does not exist" containerID="5fc870af199d176fe903d577632d3d2219e8ed38eb77e13e181a63ebff6edcb7" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.531289 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fc870af199d176fe903d577632d3d2219e8ed38eb77e13e181a63ebff6edcb7"} err="failed to get container status \"5fc870af199d176fe903d577632d3d2219e8ed38eb77e13e181a63ebff6edcb7\": rpc error: code = NotFound desc = could not find container \"5fc870af199d176fe903d577632d3d2219e8ed38eb77e13e181a63ebff6edcb7\": container with ID starting with 5fc870af199d176fe903d577632d3d2219e8ed38eb77e13e181a63ebff6edcb7 not found: ID does not exist" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.531336 4932 scope.go:117] "RemoveContainer" containerID="66400b9758e4badcc92a5954dc28db397735ed8dad2e303ea1b7707aaece1b95" Nov 25 09:40:56 crc kubenswrapper[4932]: E1125 09:40:56.531823 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66400b9758e4badcc92a5954dc28db397735ed8dad2e303ea1b7707aaece1b95\": container with ID starting with 66400b9758e4badcc92a5954dc28db397735ed8dad2e303ea1b7707aaece1b95 not found: ID does not exist" containerID="66400b9758e4badcc92a5954dc28db397735ed8dad2e303ea1b7707aaece1b95" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.531851 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66400b9758e4badcc92a5954dc28db397735ed8dad2e303ea1b7707aaece1b95"} err="failed to get container status \"66400b9758e4badcc92a5954dc28db397735ed8dad2e303ea1b7707aaece1b95\": rpc error: code = NotFound desc = could not find container \"66400b9758e4badcc92a5954dc28db397735ed8dad2e303ea1b7707aaece1b95\": container with ID starting with 66400b9758e4badcc92a5954dc28db397735ed8dad2e303ea1b7707aaece1b95 not found: ID does not exist" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.554038 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfhdw\" (UniqueName: \"kubernetes.io/projected/1effeeb4-6a39-404b-8804-2bf843301fbf-kube-api-access-pfhdw\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.554079 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.554092 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1effeeb4-6a39-404b-8804-2bf843301fbf-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.772472 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g9h9x"] Nov 25 09:40:56 crc kubenswrapper[4932]: I1125 09:40:56.776495 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-g9h9x"] Nov 25 09:40:58 crc kubenswrapper[4932]: I1125 09:40:58.619180 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1effeeb4-6a39-404b-8804-2bf843301fbf" path="/var/lib/kubelet/pods/1effeeb4-6a39-404b-8804-2bf843301fbf/volumes" Nov 25 09:41:04 crc kubenswrapper[4932]: I1125 09:41:04.605608 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:41:04 crc kubenswrapper[4932]: E1125 09:41:04.606154 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:41:16 crc kubenswrapper[4932]: I1125 09:41:16.606153 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:41:16 crc kubenswrapper[4932]: E1125 09:41:16.608323 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:41:27 crc kubenswrapper[4932]: I1125 09:41:27.606692 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:41:27 crc kubenswrapper[4932]: E1125 09:41:27.607881 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:41:40 crc kubenswrapper[4932]: I1125 09:41:40.609556 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:41:40 crc kubenswrapper[4932]: E1125 09:41:40.610208 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:41:52 crc kubenswrapper[4932]: I1125 09:41:52.605912 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:41:52 crc kubenswrapper[4932]: E1125 09:41:52.607649 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:42:04 crc kubenswrapper[4932]: I1125 09:42:04.606714 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:42:04 crc kubenswrapper[4932]: E1125 09:42:04.607594 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:42:15 crc kubenswrapper[4932]: I1125 09:42:15.606403 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:42:15 crc kubenswrapper[4932]: E1125 09:42:15.608633 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:42:28 crc kubenswrapper[4932]: I1125 09:42:28.606305 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:42:28 crc kubenswrapper[4932]: E1125 09:42:28.606965 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:42:41 crc kubenswrapper[4932]: I1125 09:42:41.606157 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:42:42 crc kubenswrapper[4932]: I1125 09:42:42.275708 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"56e4dc7a0ec60533fb570925012d5aeb8693e4583bf0594f9180970938e1bab7"} Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.178833 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk"] Nov 25 09:45:00 crc kubenswrapper[4932]: E1125 09:45:00.179885 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1effeeb4-6a39-404b-8804-2bf843301fbf" containerName="registry-server" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.179906 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1effeeb4-6a39-404b-8804-2bf843301fbf" containerName="registry-server" Nov 25 09:45:00 crc kubenswrapper[4932]: E1125 09:45:00.179951 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1effeeb4-6a39-404b-8804-2bf843301fbf" containerName="extract-utilities" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.179961 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1effeeb4-6a39-404b-8804-2bf843301fbf" containerName="extract-utilities" Nov 25 09:45:00 crc kubenswrapper[4932]: E1125 09:45:00.179984 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1effeeb4-6a39-404b-8804-2bf843301fbf" containerName="extract-content" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.179991 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1effeeb4-6a39-404b-8804-2bf843301fbf" containerName="extract-content" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.180176 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="1effeeb4-6a39-404b-8804-2bf843301fbf" containerName="registry-server" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.182297 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.185280 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.186755 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.191852 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk"] Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.331181 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79047c8e-3944-490c-bc62-61352910d301-config-volume\") pod \"collect-profiles-29401065-mb8gk\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.331279 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79047c8e-3944-490c-bc62-61352910d301-secret-volume\") pod \"collect-profiles-29401065-mb8gk\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.331310 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4czgn\" (UniqueName: \"kubernetes.io/projected/79047c8e-3944-490c-bc62-61352910d301-kube-api-access-4czgn\") pod \"collect-profiles-29401065-mb8gk\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.432472 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79047c8e-3944-490c-bc62-61352910d301-config-volume\") pod \"collect-profiles-29401065-mb8gk\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.432547 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79047c8e-3944-490c-bc62-61352910d301-secret-volume\") pod \"collect-profiles-29401065-mb8gk\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.432573 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4czgn\" (UniqueName: \"kubernetes.io/projected/79047c8e-3944-490c-bc62-61352910d301-kube-api-access-4czgn\") pod \"collect-profiles-29401065-mb8gk\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.433875 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79047c8e-3944-490c-bc62-61352910d301-config-volume\") pod \"collect-profiles-29401065-mb8gk\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.440890 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79047c8e-3944-490c-bc62-61352910d301-secret-volume\") pod \"collect-profiles-29401065-mb8gk\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.450010 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4czgn\" (UniqueName: \"kubernetes.io/projected/79047c8e-3944-490c-bc62-61352910d301-kube-api-access-4czgn\") pod \"collect-profiles-29401065-mb8gk\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.511271 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:00 crc kubenswrapper[4932]: I1125 09:45:00.979565 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk"] Nov 25 09:45:01 crc kubenswrapper[4932]: I1125 09:45:01.386556 4932 generic.go:334] "Generic (PLEG): container finished" podID="79047c8e-3944-490c-bc62-61352910d301" containerID="88d49f8d236adada4de921553e9c57d3d2dcb4e14fbb0aed8996708b09e3c744" exitCode=0 Nov 25 09:45:01 crc kubenswrapper[4932]: I1125 09:45:01.386791 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" event={"ID":"79047c8e-3944-490c-bc62-61352910d301","Type":"ContainerDied","Data":"88d49f8d236adada4de921553e9c57d3d2dcb4e14fbb0aed8996708b09e3c744"} Nov 25 09:45:01 crc kubenswrapper[4932]: I1125 09:45:01.387071 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" event={"ID":"79047c8e-3944-490c-bc62-61352910d301","Type":"ContainerStarted","Data":"5d7a4342568f91df8ac02334dccead6020027427d240f3ff162093535db77b3b"} Nov 25 09:45:02 crc kubenswrapper[4932]: I1125 09:45:02.828135 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:02 crc kubenswrapper[4932]: I1125 09:45:02.929647 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4czgn\" (UniqueName: \"kubernetes.io/projected/79047c8e-3944-490c-bc62-61352910d301-kube-api-access-4czgn\") pod \"79047c8e-3944-490c-bc62-61352910d301\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " Nov 25 09:45:02 crc kubenswrapper[4932]: I1125 09:45:02.929756 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79047c8e-3944-490c-bc62-61352910d301-secret-volume\") pod \"79047c8e-3944-490c-bc62-61352910d301\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " Nov 25 09:45:02 crc kubenswrapper[4932]: I1125 09:45:02.929838 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79047c8e-3944-490c-bc62-61352910d301-config-volume\") pod \"79047c8e-3944-490c-bc62-61352910d301\" (UID: \"79047c8e-3944-490c-bc62-61352910d301\") " Nov 25 09:45:02 crc kubenswrapper[4932]: I1125 09:45:02.930699 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79047c8e-3944-490c-bc62-61352910d301-config-volume" (OuterVolumeSpecName: "config-volume") pod "79047c8e-3944-490c-bc62-61352910d301" (UID: "79047c8e-3944-490c-bc62-61352910d301"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 09:45:02 crc kubenswrapper[4932]: I1125 09:45:02.936312 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79047c8e-3944-490c-bc62-61352910d301-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "79047c8e-3944-490c-bc62-61352910d301" (UID: "79047c8e-3944-490c-bc62-61352910d301"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 09:45:02 crc kubenswrapper[4932]: I1125 09:45:02.942998 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79047c8e-3944-490c-bc62-61352910d301-kube-api-access-4czgn" (OuterVolumeSpecName: "kube-api-access-4czgn") pod "79047c8e-3944-490c-bc62-61352910d301" (UID: "79047c8e-3944-490c-bc62-61352910d301"). InnerVolumeSpecName "kube-api-access-4czgn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:45:03 crc kubenswrapper[4932]: I1125 09:45:03.031866 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/79047c8e-3944-490c-bc62-61352910d301-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:03 crc kubenswrapper[4932]: I1125 09:45:03.031908 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4czgn\" (UniqueName: \"kubernetes.io/projected/79047c8e-3944-490c-bc62-61352910d301-kube-api-access-4czgn\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:03 crc kubenswrapper[4932]: I1125 09:45:03.031921 4932 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/79047c8e-3944-490c-bc62-61352910d301-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 09:45:03 crc kubenswrapper[4932]: I1125 09:45:03.406466 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" event={"ID":"79047c8e-3944-490c-bc62-61352910d301","Type":"ContainerDied","Data":"5d7a4342568f91df8ac02334dccead6020027427d240f3ff162093535db77b3b"} Nov 25 09:45:03 crc kubenswrapper[4932]: I1125 09:45:03.406511 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d7a4342568f91df8ac02334dccead6020027427d240f3ff162093535db77b3b" Nov 25 09:45:03 crc kubenswrapper[4932]: I1125 09:45:03.406585 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk" Nov 25 09:45:03 crc kubenswrapper[4932]: I1125 09:45:03.904175 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r"] Nov 25 09:45:03 crc kubenswrapper[4932]: I1125 09:45:03.909801 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401020-6gx8r"] Nov 25 09:45:04 crc kubenswrapper[4932]: I1125 09:45:04.615051 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fb98ca2-1d66-4de8-b842-2cbf51c82530" path="/var/lib/kubelet/pods/4fb98ca2-1d66-4de8-b842-2cbf51c82530/volumes" Nov 25 09:45:07 crc kubenswrapper[4932]: I1125 09:45:07.181207 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:45:07 crc kubenswrapper[4932]: I1125 09:45:07.181788 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:45:08 crc kubenswrapper[4932]: I1125 09:45:08.028024 4932 scope.go:117] "RemoveContainer" containerID="0e1064c5d6796c880e8232396383ce88d1f8d0581350bca7a5bdab8ced5974a6" Nov 25 09:45:37 crc kubenswrapper[4932]: I1125 09:45:37.181229 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:45:37 crc kubenswrapper[4932]: I1125 09:45:37.181905 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:46:07 crc kubenswrapper[4932]: I1125 09:46:07.180623 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:46:07 crc kubenswrapper[4932]: I1125 09:46:07.181338 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:46:07 crc kubenswrapper[4932]: I1125 09:46:07.181393 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:46:07 crc kubenswrapper[4932]: I1125 09:46:07.182128 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"56e4dc7a0ec60533fb570925012d5aeb8693e4583bf0594f9180970938e1bab7"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:46:07 crc kubenswrapper[4932]: I1125 09:46:07.182185 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://56e4dc7a0ec60533fb570925012d5aeb8693e4583bf0594f9180970938e1bab7" gracePeriod=600 Nov 25 09:46:08 crc kubenswrapper[4932]: I1125 09:46:08.005377 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="56e4dc7a0ec60533fb570925012d5aeb8693e4583bf0594f9180970938e1bab7" exitCode=0 Nov 25 09:46:08 crc kubenswrapper[4932]: I1125 09:46:08.005461 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"56e4dc7a0ec60533fb570925012d5aeb8693e4583bf0594f9180970938e1bab7"} Nov 25 09:46:08 crc kubenswrapper[4932]: I1125 09:46:08.006039 4932 scope.go:117] "RemoveContainer" containerID="ec835ea898700287554046cf2c7b2f1bded21fe45967f7cb89a89a8fa73103e7" Nov 25 09:46:09 crc kubenswrapper[4932]: I1125 09:46:09.018329 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa"} Nov 25 09:48:37 crc kubenswrapper[4932]: I1125 09:48:37.181529 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:48:37 crc kubenswrapper[4932]: I1125 09:48:37.183566 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:49:07 crc kubenswrapper[4932]: I1125 09:49:07.180941 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:49:07 crc kubenswrapper[4932]: I1125 09:49:07.181847 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.301266 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xknwp"] Nov 25 09:49:11 crc kubenswrapper[4932]: E1125 09:49:11.302116 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79047c8e-3944-490c-bc62-61352910d301" containerName="collect-profiles" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.302131 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="79047c8e-3944-490c-bc62-61352910d301" containerName="collect-profiles" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.302304 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="79047c8e-3944-490c-bc62-61352910d301" containerName="collect-profiles" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.312056 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xknwp"] Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.312159 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.468672 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-catalog-content\") pod \"community-operators-xknwp\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.468725 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgrmp\" (UniqueName: \"kubernetes.io/projected/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-kube-api-access-rgrmp\") pod \"community-operators-xknwp\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.468903 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-utilities\") pod \"community-operators-xknwp\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.570913 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-utilities\") pod \"community-operators-xknwp\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.571027 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-catalog-content\") pod \"community-operators-xknwp\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.571069 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgrmp\" (UniqueName: \"kubernetes.io/projected/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-kube-api-access-rgrmp\") pod \"community-operators-xknwp\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.571576 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-utilities\") pod \"community-operators-xknwp\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.571906 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-catalog-content\") pod \"community-operators-xknwp\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.593651 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgrmp\" (UniqueName: \"kubernetes.io/projected/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-kube-api-access-rgrmp\") pod \"community-operators-xknwp\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:11 crc kubenswrapper[4932]: I1125 09:49:11.631770 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:12 crc kubenswrapper[4932]: I1125 09:49:12.126121 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xknwp"] Nov 25 09:49:12 crc kubenswrapper[4932]: I1125 09:49:12.485356 4932 generic.go:334] "Generic (PLEG): container finished" podID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" containerID="e9daebf80fe26786a080e150fd3b592569c78819e51986e76238cd32c790e3d2" exitCode=0 Nov 25 09:49:12 crc kubenswrapper[4932]: I1125 09:49:12.485409 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xknwp" event={"ID":"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b","Type":"ContainerDied","Data":"e9daebf80fe26786a080e150fd3b592569c78819e51986e76238cd32c790e3d2"} Nov 25 09:49:12 crc kubenswrapper[4932]: I1125 09:49:12.485697 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xknwp" event={"ID":"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b","Type":"ContainerStarted","Data":"01d1edfcf6bf898e8ff7a3aabce98dc750ce7d47ef829d51e3f2828f31bd0afc"} Nov 25 09:49:12 crc kubenswrapper[4932]: I1125 09:49:12.487624 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 09:49:14 crc kubenswrapper[4932]: I1125 09:49:14.501180 4932 generic.go:334] "Generic (PLEG): container finished" podID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" containerID="b03aa93fbe1a79bcd92710eb8cbd21719de879366cac0cea45ffc26313645e19" exitCode=0 Nov 25 09:49:14 crc kubenswrapper[4932]: I1125 09:49:14.501586 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xknwp" event={"ID":"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b","Type":"ContainerDied","Data":"b03aa93fbe1a79bcd92710eb8cbd21719de879366cac0cea45ffc26313645e19"} Nov 25 09:49:15 crc kubenswrapper[4932]: I1125 09:49:15.511599 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xknwp" event={"ID":"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b","Type":"ContainerStarted","Data":"d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4"} Nov 25 09:49:15 crc kubenswrapper[4932]: I1125 09:49:15.535801 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xknwp" podStartSLOduration=1.741270554 podStartE2EDuration="4.535779768s" podCreationTimestamp="2025-11-25 09:49:11 +0000 UTC" firstStartedPulling="2025-11-25 09:49:12.487411479 +0000 UTC m=+3612.613441032" lastFinishedPulling="2025-11-25 09:49:15.281920673 +0000 UTC m=+3615.407950246" observedRunningTime="2025-11-25 09:49:15.533298718 +0000 UTC m=+3615.659328301" watchObservedRunningTime="2025-11-25 09:49:15.535779768 +0000 UTC m=+3615.661809331" Nov 25 09:49:21 crc kubenswrapper[4932]: I1125 09:49:21.632393 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:21 crc kubenswrapper[4932]: I1125 09:49:21.632934 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:21 crc kubenswrapper[4932]: I1125 09:49:21.680117 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:22 crc kubenswrapper[4932]: I1125 09:49:22.618454 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:22 crc kubenswrapper[4932]: I1125 09:49:22.660467 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xknwp"] Nov 25 09:49:24 crc kubenswrapper[4932]: I1125 09:49:24.591459 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xknwp" podUID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" containerName="registry-server" containerID="cri-o://d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4" gracePeriod=2 Nov 25 09:49:24 crc kubenswrapper[4932]: I1125 09:49:24.964230 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.082876 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgrmp\" (UniqueName: \"kubernetes.io/projected/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-kube-api-access-rgrmp\") pod \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.083479 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-catalog-content\") pod \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.083577 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-utilities\") pod \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\" (UID: \"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b\") " Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.084328 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-utilities" (OuterVolumeSpecName: "utilities") pod "b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" (UID: "b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.089312 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-kube-api-access-rgrmp" (OuterVolumeSpecName: "kube-api-access-rgrmp") pod "b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" (UID: "b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b"). InnerVolumeSpecName "kube-api-access-rgrmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.184654 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.184698 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgrmp\" (UniqueName: \"kubernetes.io/projected/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-kube-api-access-rgrmp\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.227886 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" (UID: "b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.286031 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.599592 4932 generic.go:334] "Generic (PLEG): container finished" podID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" containerID="d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4" exitCode=0 Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.599644 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xknwp" event={"ID":"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b","Type":"ContainerDied","Data":"d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4"} Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.599722 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xknwp" event={"ID":"b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b","Type":"ContainerDied","Data":"01d1edfcf6bf898e8ff7a3aabce98dc750ce7d47ef829d51e3f2828f31bd0afc"} Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.599746 4932 scope.go:117] "RemoveContainer" containerID="d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.599683 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xknwp" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.632480 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xknwp"] Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.638135 4932 scope.go:117] "RemoveContainer" containerID="b03aa93fbe1a79bcd92710eb8cbd21719de879366cac0cea45ffc26313645e19" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.638238 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xknwp"] Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.660886 4932 scope.go:117] "RemoveContainer" containerID="e9daebf80fe26786a080e150fd3b592569c78819e51986e76238cd32c790e3d2" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.682765 4932 scope.go:117] "RemoveContainer" containerID="d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4" Nov 25 09:49:25 crc kubenswrapper[4932]: E1125 09:49:25.683311 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4\": container with ID starting with d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4 not found: ID does not exist" containerID="d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.683340 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4"} err="failed to get container status \"d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4\": rpc error: code = NotFound desc = could not find container \"d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4\": container with ID starting with d6b918684c680e26d6c537d2f52ce4e88a52b32a13c2887cef42bcd8b5fe5fd4 not found: ID does not exist" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.683381 4932 scope.go:117] "RemoveContainer" containerID="b03aa93fbe1a79bcd92710eb8cbd21719de879366cac0cea45ffc26313645e19" Nov 25 09:49:25 crc kubenswrapper[4932]: E1125 09:49:25.684028 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b03aa93fbe1a79bcd92710eb8cbd21719de879366cac0cea45ffc26313645e19\": container with ID starting with b03aa93fbe1a79bcd92710eb8cbd21719de879366cac0cea45ffc26313645e19 not found: ID does not exist" containerID="b03aa93fbe1a79bcd92710eb8cbd21719de879366cac0cea45ffc26313645e19" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.684078 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b03aa93fbe1a79bcd92710eb8cbd21719de879366cac0cea45ffc26313645e19"} err="failed to get container status \"b03aa93fbe1a79bcd92710eb8cbd21719de879366cac0cea45ffc26313645e19\": rpc error: code = NotFound desc = could not find container \"b03aa93fbe1a79bcd92710eb8cbd21719de879366cac0cea45ffc26313645e19\": container with ID starting with b03aa93fbe1a79bcd92710eb8cbd21719de879366cac0cea45ffc26313645e19 not found: ID does not exist" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.684114 4932 scope.go:117] "RemoveContainer" containerID="e9daebf80fe26786a080e150fd3b592569c78819e51986e76238cd32c790e3d2" Nov 25 09:49:25 crc kubenswrapper[4932]: E1125 09:49:25.684579 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e9daebf80fe26786a080e150fd3b592569c78819e51986e76238cd32c790e3d2\": container with ID starting with e9daebf80fe26786a080e150fd3b592569c78819e51986e76238cd32c790e3d2 not found: ID does not exist" containerID="e9daebf80fe26786a080e150fd3b592569c78819e51986e76238cd32c790e3d2" Nov 25 09:49:25 crc kubenswrapper[4932]: I1125 09:49:25.684622 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e9daebf80fe26786a080e150fd3b592569c78819e51986e76238cd32c790e3d2"} err="failed to get container status \"e9daebf80fe26786a080e150fd3b592569c78819e51986e76238cd32c790e3d2\": rpc error: code = NotFound desc = could not find container \"e9daebf80fe26786a080e150fd3b592569c78819e51986e76238cd32c790e3d2\": container with ID starting with e9daebf80fe26786a080e150fd3b592569c78819e51986e76238cd32c790e3d2 not found: ID does not exist" Nov 25 09:49:26 crc kubenswrapper[4932]: I1125 09:49:26.614692 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" path="/var/lib/kubelet/pods/b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b/volumes" Nov 25 09:49:37 crc kubenswrapper[4932]: I1125 09:49:37.181159 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:49:37 crc kubenswrapper[4932]: I1125 09:49:37.181952 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:49:37 crc kubenswrapper[4932]: I1125 09:49:37.182007 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:49:37 crc kubenswrapper[4932]: I1125 09:49:37.182613 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:49:37 crc kubenswrapper[4932]: I1125 09:49:37.182663 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" gracePeriod=600 Nov 25 09:49:37 crc kubenswrapper[4932]: E1125 09:49:37.331228 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:49:37 crc kubenswrapper[4932]: I1125 09:49:37.724397 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" exitCode=0 Nov 25 09:49:37 crc kubenswrapper[4932]: I1125 09:49:37.724519 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa"} Nov 25 09:49:37 crc kubenswrapper[4932]: I1125 09:49:37.724859 4932 scope.go:117] "RemoveContainer" containerID="56e4dc7a0ec60533fb570925012d5aeb8693e4583bf0594f9180970938e1bab7" Nov 25 09:49:37 crc kubenswrapper[4932]: I1125 09:49:37.726070 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:49:37 crc kubenswrapper[4932]: E1125 09:49:37.726332 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:49:50 crc kubenswrapper[4932]: I1125 09:49:50.610881 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:49:50 crc kubenswrapper[4932]: E1125 09:49:50.611901 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.143242 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lgcqj"] Nov 25 09:50:01 crc kubenswrapper[4932]: E1125 09:50:01.144351 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" containerName="extract-utilities" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.144369 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" containerName="extract-utilities" Nov 25 09:50:01 crc kubenswrapper[4932]: E1125 09:50:01.144401 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" containerName="extract-content" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.144408 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" containerName="extract-content" Nov 25 09:50:01 crc kubenswrapper[4932]: E1125 09:50:01.144418 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" containerName="registry-server" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.144426 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" containerName="registry-server" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.144626 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6d9ae90-3cf2-49ae-9a8d-639a4ef6c52b" containerName="registry-server" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.145756 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.161756 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lgcqj"] Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.330793 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-catalog-content\") pod \"redhat-marketplace-lgcqj\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.330887 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2pf9\" (UniqueName: \"kubernetes.io/projected/3d9a4c0e-58e6-4895-99e8-467806c345d0-kube-api-access-v2pf9\") pod \"redhat-marketplace-lgcqj\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.330911 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-utilities\") pod \"redhat-marketplace-lgcqj\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.432124 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2pf9\" (UniqueName: \"kubernetes.io/projected/3d9a4c0e-58e6-4895-99e8-467806c345d0-kube-api-access-v2pf9\") pod \"redhat-marketplace-lgcqj\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.432177 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-utilities\") pod \"redhat-marketplace-lgcqj\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.432305 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-catalog-content\") pod \"redhat-marketplace-lgcqj\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.432759 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-catalog-content\") pod \"redhat-marketplace-lgcqj\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.432985 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-utilities\") pod \"redhat-marketplace-lgcqj\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.463386 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2pf9\" (UniqueName: \"kubernetes.io/projected/3d9a4c0e-58e6-4895-99e8-467806c345d0-kube-api-access-v2pf9\") pod \"redhat-marketplace-lgcqj\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.469903 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:01 crc kubenswrapper[4932]: I1125 09:50:01.932441 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lgcqj"] Nov 25 09:50:02 crc kubenswrapper[4932]: I1125 09:50:02.929757 4932 generic.go:334] "Generic (PLEG): container finished" podID="3d9a4c0e-58e6-4895-99e8-467806c345d0" containerID="d4c78680a40a85434e6b36219b5a62169453394ac89fdd176b09aa20f3bd9362" exitCode=0 Nov 25 09:50:02 crc kubenswrapper[4932]: I1125 09:50:02.929836 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lgcqj" event={"ID":"3d9a4c0e-58e6-4895-99e8-467806c345d0","Type":"ContainerDied","Data":"d4c78680a40a85434e6b36219b5a62169453394ac89fdd176b09aa20f3bd9362"} Nov 25 09:50:02 crc kubenswrapper[4932]: I1125 09:50:02.930246 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lgcqj" event={"ID":"3d9a4c0e-58e6-4895-99e8-467806c345d0","Type":"ContainerStarted","Data":"eb6e5d63bb822ca78719681a61f8ee65812653b1a2c23cd60379bc6b2d611c07"} Nov 25 09:50:03 crc kubenswrapper[4932]: I1125 09:50:03.941079 4932 generic.go:334] "Generic (PLEG): container finished" podID="3d9a4c0e-58e6-4895-99e8-467806c345d0" containerID="a8a08178e6d9a5065d4e958880ca4e1ffea5aac1a2d1c264a7efb4436219c6e1" exitCode=0 Nov 25 09:50:03 crc kubenswrapper[4932]: I1125 09:50:03.941171 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lgcqj" event={"ID":"3d9a4c0e-58e6-4895-99e8-467806c345d0","Type":"ContainerDied","Data":"a8a08178e6d9a5065d4e958880ca4e1ffea5aac1a2d1c264a7efb4436219c6e1"} Nov 25 09:50:04 crc kubenswrapper[4932]: I1125 09:50:04.955976 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lgcqj" event={"ID":"3d9a4c0e-58e6-4895-99e8-467806c345d0","Type":"ContainerStarted","Data":"38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385"} Nov 25 09:50:04 crc kubenswrapper[4932]: I1125 09:50:04.975472 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lgcqj" podStartSLOduration=2.596231206 podStartE2EDuration="3.975450452s" podCreationTimestamp="2025-11-25 09:50:01 +0000 UTC" firstStartedPulling="2025-11-25 09:50:02.931561204 +0000 UTC m=+3663.057590777" lastFinishedPulling="2025-11-25 09:50:04.31078046 +0000 UTC m=+3664.436810023" observedRunningTime="2025-11-25 09:50:04.97290067 +0000 UTC m=+3665.098930233" watchObservedRunningTime="2025-11-25 09:50:04.975450452 +0000 UTC m=+3665.101480045" Nov 25 09:50:05 crc kubenswrapper[4932]: I1125 09:50:05.605860 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:50:05 crc kubenswrapper[4932]: E1125 09:50:05.606107 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.125722 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mm6hg"] Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.128377 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.147130 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mm6hg"] Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.246758 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-catalog-content\") pod \"redhat-operators-mm6hg\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.247066 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbd9t\" (UniqueName: \"kubernetes.io/projected/2658acaa-bb88-458e-a7b6-8d57a4d54936-kube-api-access-jbd9t\") pod \"redhat-operators-mm6hg\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.247163 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-utilities\") pod \"redhat-operators-mm6hg\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.349270 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-catalog-content\") pod \"redhat-operators-mm6hg\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.349391 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbd9t\" (UniqueName: \"kubernetes.io/projected/2658acaa-bb88-458e-a7b6-8d57a4d54936-kube-api-access-jbd9t\") pod \"redhat-operators-mm6hg\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.349423 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-utilities\") pod \"redhat-operators-mm6hg\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.349974 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-catalog-content\") pod \"redhat-operators-mm6hg\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.350219 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-utilities\") pod \"redhat-operators-mm6hg\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.370326 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbd9t\" (UniqueName: \"kubernetes.io/projected/2658acaa-bb88-458e-a7b6-8d57a4d54936-kube-api-access-jbd9t\") pod \"redhat-operators-mm6hg\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.453174 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.929409 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mm6hg"] Nov 25 09:50:08 crc kubenswrapper[4932]: W1125 09:50:08.944463 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2658acaa_bb88_458e_a7b6_8d57a4d54936.slice/crio-e1fb14f2e05d6c4e124e8284ce73bfc0a776c7c30832aab33b151f0f82cc73bf WatchSource:0}: Error finding container e1fb14f2e05d6c4e124e8284ce73bfc0a776c7c30832aab33b151f0f82cc73bf: Status 404 returned error can't find the container with id e1fb14f2e05d6c4e124e8284ce73bfc0a776c7c30832aab33b151f0f82cc73bf Nov 25 09:50:08 crc kubenswrapper[4932]: I1125 09:50:08.989835 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6hg" event={"ID":"2658acaa-bb88-458e-a7b6-8d57a4d54936","Type":"ContainerStarted","Data":"e1fb14f2e05d6c4e124e8284ce73bfc0a776c7c30832aab33b151f0f82cc73bf"} Nov 25 09:50:10 crc kubenswrapper[4932]: I1125 09:50:10.001939 4932 generic.go:334] "Generic (PLEG): container finished" podID="2658acaa-bb88-458e-a7b6-8d57a4d54936" containerID="126064929b2b03f7623cfa5af4b382c778a5fd4aaa33f93f69177b6c6c01800e" exitCode=0 Nov 25 09:50:10 crc kubenswrapper[4932]: I1125 09:50:10.002011 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6hg" event={"ID":"2658acaa-bb88-458e-a7b6-8d57a4d54936","Type":"ContainerDied","Data":"126064929b2b03f7623cfa5af4b382c778a5fd4aaa33f93f69177b6c6c01800e"} Nov 25 09:50:11 crc kubenswrapper[4932]: I1125 09:50:11.011125 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6hg" event={"ID":"2658acaa-bb88-458e-a7b6-8d57a4d54936","Type":"ContainerStarted","Data":"b4907317f8f8f38c4bb29998f13d39c589fb35df3e8ae3b8dfe0d71151d4d388"} Nov 25 09:50:11 crc kubenswrapper[4932]: I1125 09:50:11.471178 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:11 crc kubenswrapper[4932]: I1125 09:50:11.471564 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:11 crc kubenswrapper[4932]: I1125 09:50:11.521030 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:12 crc kubenswrapper[4932]: I1125 09:50:12.024676 4932 generic.go:334] "Generic (PLEG): container finished" podID="2658acaa-bb88-458e-a7b6-8d57a4d54936" containerID="b4907317f8f8f38c4bb29998f13d39c589fb35df3e8ae3b8dfe0d71151d4d388" exitCode=0 Nov 25 09:50:12 crc kubenswrapper[4932]: I1125 09:50:12.024872 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6hg" event={"ID":"2658acaa-bb88-458e-a7b6-8d57a4d54936","Type":"ContainerDied","Data":"b4907317f8f8f38c4bb29998f13d39c589fb35df3e8ae3b8dfe0d71151d4d388"} Nov 25 09:50:12 crc kubenswrapper[4932]: I1125 09:50:12.070660 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:13 crc kubenswrapper[4932]: I1125 09:50:13.034617 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6hg" event={"ID":"2658acaa-bb88-458e-a7b6-8d57a4d54936","Type":"ContainerStarted","Data":"f3ad133e64067439cfbc78f79a9c14dca5b205207cd7ae662c301f961afbf496"} Nov 25 09:50:13 crc kubenswrapper[4932]: I1125 09:50:13.913504 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mm6hg" podStartSLOduration=3.454690196 podStartE2EDuration="5.913484807s" podCreationTimestamp="2025-11-25 09:50:08 +0000 UTC" firstStartedPulling="2025-11-25 09:50:10.00449617 +0000 UTC m=+3670.130525733" lastFinishedPulling="2025-11-25 09:50:12.463290781 +0000 UTC m=+3672.589320344" observedRunningTime="2025-11-25 09:50:13.058661452 +0000 UTC m=+3673.184691015" watchObservedRunningTime="2025-11-25 09:50:13.913484807 +0000 UTC m=+3674.039514370" Nov 25 09:50:13 crc kubenswrapper[4932]: I1125 09:50:13.916182 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lgcqj"] Nov 25 09:50:14 crc kubenswrapper[4932]: I1125 09:50:14.042377 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lgcqj" podUID="3d9a4c0e-58e6-4895-99e8-467806c345d0" containerName="registry-server" containerID="cri-o://38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385" gracePeriod=2 Nov 25 09:50:14 crc kubenswrapper[4932]: I1125 09:50:14.414262 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:14 crc kubenswrapper[4932]: I1125 09:50:14.550770 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-utilities\") pod \"3d9a4c0e-58e6-4895-99e8-467806c345d0\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " Nov 25 09:50:14 crc kubenswrapper[4932]: I1125 09:50:14.550962 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2pf9\" (UniqueName: \"kubernetes.io/projected/3d9a4c0e-58e6-4895-99e8-467806c345d0-kube-api-access-v2pf9\") pod \"3d9a4c0e-58e6-4895-99e8-467806c345d0\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " Nov 25 09:50:14 crc kubenswrapper[4932]: I1125 09:50:14.551020 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-catalog-content\") pod \"3d9a4c0e-58e6-4895-99e8-467806c345d0\" (UID: \"3d9a4c0e-58e6-4895-99e8-467806c345d0\") " Nov 25 09:50:14 crc kubenswrapper[4932]: I1125 09:50:14.551543 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-utilities" (OuterVolumeSpecName: "utilities") pod "3d9a4c0e-58e6-4895-99e8-467806c345d0" (UID: "3d9a4c0e-58e6-4895-99e8-467806c345d0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:50:14 crc kubenswrapper[4932]: I1125 09:50:14.556477 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d9a4c0e-58e6-4895-99e8-467806c345d0-kube-api-access-v2pf9" (OuterVolumeSpecName: "kube-api-access-v2pf9") pod "3d9a4c0e-58e6-4895-99e8-467806c345d0" (UID: "3d9a4c0e-58e6-4895-99e8-467806c345d0"). InnerVolumeSpecName "kube-api-access-v2pf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:14 crc kubenswrapper[4932]: I1125 09:50:14.570658 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3d9a4c0e-58e6-4895-99e8-467806c345d0" (UID: "3d9a4c0e-58e6-4895-99e8-467806c345d0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:50:14 crc kubenswrapper[4932]: I1125 09:50:14.653770 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2pf9\" (UniqueName: \"kubernetes.io/projected/3d9a4c0e-58e6-4895-99e8-467806c345d0-kube-api-access-v2pf9\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:14 crc kubenswrapper[4932]: I1125 09:50:14.653987 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:14 crc kubenswrapper[4932]: I1125 09:50:14.654125 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d9a4c0e-58e6-4895-99e8-467806c345d0-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.053932 4932 generic.go:334] "Generic (PLEG): container finished" podID="3d9a4c0e-58e6-4895-99e8-467806c345d0" containerID="38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385" exitCode=0 Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.054005 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lgcqj" event={"ID":"3d9a4c0e-58e6-4895-99e8-467806c345d0","Type":"ContainerDied","Data":"38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385"} Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.054041 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lgcqj" event={"ID":"3d9a4c0e-58e6-4895-99e8-467806c345d0","Type":"ContainerDied","Data":"eb6e5d63bb822ca78719681a61f8ee65812653b1a2c23cd60379bc6b2d611c07"} Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.054066 4932 scope.go:117] "RemoveContainer" containerID="38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385" Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.054008 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lgcqj" Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.082400 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lgcqj"] Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.088769 4932 scope.go:117] "RemoveContainer" containerID="a8a08178e6d9a5065d4e958880ca4e1ffea5aac1a2d1c264a7efb4436219c6e1" Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.089420 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lgcqj"] Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.111604 4932 scope.go:117] "RemoveContainer" containerID="d4c78680a40a85434e6b36219b5a62169453394ac89fdd176b09aa20f3bd9362" Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.147999 4932 scope.go:117] "RemoveContainer" containerID="38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385" Nov 25 09:50:15 crc kubenswrapper[4932]: E1125 09:50:15.149227 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385\": container with ID starting with 38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385 not found: ID does not exist" containerID="38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385" Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.149271 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385"} err="failed to get container status \"38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385\": rpc error: code = NotFound desc = could not find container \"38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385\": container with ID starting with 38951f7432a6439432f8addf0e5e279756e48805a570bfac8e53c1aa713cf385 not found: ID does not exist" Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.149299 4932 scope.go:117] "RemoveContainer" containerID="a8a08178e6d9a5065d4e958880ca4e1ffea5aac1a2d1c264a7efb4436219c6e1" Nov 25 09:50:15 crc kubenswrapper[4932]: E1125 09:50:15.149934 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8a08178e6d9a5065d4e958880ca4e1ffea5aac1a2d1c264a7efb4436219c6e1\": container with ID starting with a8a08178e6d9a5065d4e958880ca4e1ffea5aac1a2d1c264a7efb4436219c6e1 not found: ID does not exist" containerID="a8a08178e6d9a5065d4e958880ca4e1ffea5aac1a2d1c264a7efb4436219c6e1" Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.149986 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8a08178e6d9a5065d4e958880ca4e1ffea5aac1a2d1c264a7efb4436219c6e1"} err="failed to get container status \"a8a08178e6d9a5065d4e958880ca4e1ffea5aac1a2d1c264a7efb4436219c6e1\": rpc error: code = NotFound desc = could not find container \"a8a08178e6d9a5065d4e958880ca4e1ffea5aac1a2d1c264a7efb4436219c6e1\": container with ID starting with a8a08178e6d9a5065d4e958880ca4e1ffea5aac1a2d1c264a7efb4436219c6e1 not found: ID does not exist" Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.150022 4932 scope.go:117] "RemoveContainer" containerID="d4c78680a40a85434e6b36219b5a62169453394ac89fdd176b09aa20f3bd9362" Nov 25 09:50:15 crc kubenswrapper[4932]: E1125 09:50:15.150438 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4c78680a40a85434e6b36219b5a62169453394ac89fdd176b09aa20f3bd9362\": container with ID starting with d4c78680a40a85434e6b36219b5a62169453394ac89fdd176b09aa20f3bd9362 not found: ID does not exist" containerID="d4c78680a40a85434e6b36219b5a62169453394ac89fdd176b09aa20f3bd9362" Nov 25 09:50:15 crc kubenswrapper[4932]: I1125 09:50:15.150472 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4c78680a40a85434e6b36219b5a62169453394ac89fdd176b09aa20f3bd9362"} err="failed to get container status \"d4c78680a40a85434e6b36219b5a62169453394ac89fdd176b09aa20f3bd9362\": rpc error: code = NotFound desc = could not find container \"d4c78680a40a85434e6b36219b5a62169453394ac89fdd176b09aa20f3bd9362\": container with ID starting with d4c78680a40a85434e6b36219b5a62169453394ac89fdd176b09aa20f3bd9362 not found: ID does not exist" Nov 25 09:50:16 crc kubenswrapper[4932]: I1125 09:50:16.616557 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d9a4c0e-58e6-4895-99e8-467806c345d0" path="/var/lib/kubelet/pods/3d9a4c0e-58e6-4895-99e8-467806c345d0/volumes" Nov 25 09:50:18 crc kubenswrapper[4932]: I1125 09:50:18.453405 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:18 crc kubenswrapper[4932]: I1125 09:50:18.453964 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:18 crc kubenswrapper[4932]: I1125 09:50:18.510064 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:19 crc kubenswrapper[4932]: I1125 09:50:19.125529 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:19 crc kubenswrapper[4932]: I1125 09:50:19.316027 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mm6hg"] Nov 25 09:50:20 crc kubenswrapper[4932]: I1125 09:50:20.614996 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:50:20 crc kubenswrapper[4932]: E1125 09:50:20.615550 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:50:21 crc kubenswrapper[4932]: I1125 09:50:21.107426 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mm6hg" podUID="2658acaa-bb88-458e-a7b6-8d57a4d54936" containerName="registry-server" containerID="cri-o://f3ad133e64067439cfbc78f79a9c14dca5b205207cd7ae662c301f961afbf496" gracePeriod=2 Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.128541 4932 generic.go:334] "Generic (PLEG): container finished" podID="2658acaa-bb88-458e-a7b6-8d57a4d54936" containerID="f3ad133e64067439cfbc78f79a9c14dca5b205207cd7ae662c301f961afbf496" exitCode=0 Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.128618 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6hg" event={"ID":"2658acaa-bb88-458e-a7b6-8d57a4d54936","Type":"ContainerDied","Data":"f3ad133e64067439cfbc78f79a9c14dca5b205207cd7ae662c301f961afbf496"} Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.503149 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.693251 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-utilities\") pod \"2658acaa-bb88-458e-a7b6-8d57a4d54936\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.693389 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbd9t\" (UniqueName: \"kubernetes.io/projected/2658acaa-bb88-458e-a7b6-8d57a4d54936-kube-api-access-jbd9t\") pod \"2658acaa-bb88-458e-a7b6-8d57a4d54936\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.693430 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-catalog-content\") pod \"2658acaa-bb88-458e-a7b6-8d57a4d54936\" (UID: \"2658acaa-bb88-458e-a7b6-8d57a4d54936\") " Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.694680 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-utilities" (OuterVolumeSpecName: "utilities") pod "2658acaa-bb88-458e-a7b6-8d57a4d54936" (UID: "2658acaa-bb88-458e-a7b6-8d57a4d54936"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.702483 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2658acaa-bb88-458e-a7b6-8d57a4d54936-kube-api-access-jbd9t" (OuterVolumeSpecName: "kube-api-access-jbd9t") pod "2658acaa-bb88-458e-a7b6-8d57a4d54936" (UID: "2658acaa-bb88-458e-a7b6-8d57a4d54936"). InnerVolumeSpecName "kube-api-access-jbd9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.795595 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.795633 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbd9t\" (UniqueName: \"kubernetes.io/projected/2658acaa-bb88-458e-a7b6-8d57a4d54936-kube-api-access-jbd9t\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.803466 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2658acaa-bb88-458e-a7b6-8d57a4d54936" (UID: "2658acaa-bb88-458e-a7b6-8d57a4d54936"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:50:23 crc kubenswrapper[4932]: I1125 09:50:23.897579 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2658acaa-bb88-458e-a7b6-8d57a4d54936-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:50:24 crc kubenswrapper[4932]: I1125 09:50:24.137166 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mm6hg" event={"ID":"2658acaa-bb88-458e-a7b6-8d57a4d54936","Type":"ContainerDied","Data":"e1fb14f2e05d6c4e124e8284ce73bfc0a776c7c30832aab33b151f0f82cc73bf"} Nov 25 09:50:24 crc kubenswrapper[4932]: I1125 09:50:24.138247 4932 scope.go:117] "RemoveContainer" containerID="f3ad133e64067439cfbc78f79a9c14dca5b205207cd7ae662c301f961afbf496" Nov 25 09:50:24 crc kubenswrapper[4932]: I1125 09:50:24.137245 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mm6hg" Nov 25 09:50:24 crc kubenswrapper[4932]: I1125 09:50:24.159376 4932 scope.go:117] "RemoveContainer" containerID="b4907317f8f8f38c4bb29998f13d39c589fb35df3e8ae3b8dfe0d71151d4d388" Nov 25 09:50:24 crc kubenswrapper[4932]: I1125 09:50:24.186733 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mm6hg"] Nov 25 09:50:24 crc kubenswrapper[4932]: I1125 09:50:24.193125 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mm6hg"] Nov 25 09:50:24 crc kubenswrapper[4932]: I1125 09:50:24.208342 4932 scope.go:117] "RemoveContainer" containerID="126064929b2b03f7623cfa5af4b382c778a5fd4aaa33f93f69177b6c6c01800e" Nov 25 09:50:24 crc kubenswrapper[4932]: I1125 09:50:24.626731 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2658acaa-bb88-458e-a7b6-8d57a4d54936" path="/var/lib/kubelet/pods/2658acaa-bb88-458e-a7b6-8d57a4d54936/volumes" Nov 25 09:50:32 crc kubenswrapper[4932]: I1125 09:50:32.607047 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:50:32 crc kubenswrapper[4932]: E1125 09:50:32.608531 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:50:43 crc kubenswrapper[4932]: I1125 09:50:43.606253 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:50:43 crc kubenswrapper[4932]: E1125 09:50:43.607036 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:50:56 crc kubenswrapper[4932]: I1125 09:50:56.606515 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:50:56 crc kubenswrapper[4932]: E1125 09:50:56.607663 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:51:08 crc kubenswrapper[4932]: I1125 09:51:08.606654 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:51:08 crc kubenswrapper[4932]: E1125 09:51:08.607443 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:51:20 crc kubenswrapper[4932]: I1125 09:51:20.610342 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:51:20 crc kubenswrapper[4932]: E1125 09:51:20.611244 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.605505 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:51:35 crc kubenswrapper[4932]: E1125 09:51:35.606312 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.818339 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dmc4g"] Nov 25 09:51:35 crc kubenswrapper[4932]: E1125 09:51:35.818649 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2658acaa-bb88-458e-a7b6-8d57a4d54936" containerName="extract-utilities" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.818661 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="2658acaa-bb88-458e-a7b6-8d57a4d54936" containerName="extract-utilities" Nov 25 09:51:35 crc kubenswrapper[4932]: E1125 09:51:35.818687 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9a4c0e-58e6-4895-99e8-467806c345d0" containerName="extract-content" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.818695 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9a4c0e-58e6-4895-99e8-467806c345d0" containerName="extract-content" Nov 25 09:51:35 crc kubenswrapper[4932]: E1125 09:51:35.818716 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2658acaa-bb88-458e-a7b6-8d57a4d54936" containerName="registry-server" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.818722 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="2658acaa-bb88-458e-a7b6-8d57a4d54936" containerName="registry-server" Nov 25 09:51:35 crc kubenswrapper[4932]: E1125 09:51:35.818733 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2658acaa-bb88-458e-a7b6-8d57a4d54936" containerName="extract-content" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.818739 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="2658acaa-bb88-458e-a7b6-8d57a4d54936" containerName="extract-content" Nov 25 09:51:35 crc kubenswrapper[4932]: E1125 09:51:35.818746 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9a4c0e-58e6-4895-99e8-467806c345d0" containerName="extract-utilities" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.818753 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9a4c0e-58e6-4895-99e8-467806c345d0" containerName="extract-utilities" Nov 25 09:51:35 crc kubenswrapper[4932]: E1125 09:51:35.818764 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9a4c0e-58e6-4895-99e8-467806c345d0" containerName="registry-server" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.818769 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9a4c0e-58e6-4895-99e8-467806c345d0" containerName="registry-server" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.818911 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="2658acaa-bb88-458e-a7b6-8d57a4d54936" containerName="registry-server" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.818920 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d9a4c0e-58e6-4895-99e8-467806c345d0" containerName="registry-server" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.819904 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.907861 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dmc4g"] Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.999149 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-catalog-content\") pod \"certified-operators-dmc4g\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.999218 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-utilities\") pod \"certified-operators-dmc4g\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:35 crc kubenswrapper[4932]: I1125 09:51:35.999285 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75x5t\" (UniqueName: \"kubernetes.io/projected/5f76439b-c387-4bf3-896a-15574e4ed843-kube-api-access-75x5t\") pod \"certified-operators-dmc4g\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:36 crc kubenswrapper[4932]: I1125 09:51:36.100469 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75x5t\" (UniqueName: \"kubernetes.io/projected/5f76439b-c387-4bf3-896a-15574e4ed843-kube-api-access-75x5t\") pod \"certified-operators-dmc4g\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:36 crc kubenswrapper[4932]: I1125 09:51:36.100574 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-catalog-content\") pod \"certified-operators-dmc4g\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:36 crc kubenswrapper[4932]: I1125 09:51:36.100595 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-utilities\") pod \"certified-operators-dmc4g\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:36 crc kubenswrapper[4932]: I1125 09:51:36.101055 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-utilities\") pod \"certified-operators-dmc4g\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:36 crc kubenswrapper[4932]: I1125 09:51:36.101828 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-catalog-content\") pod \"certified-operators-dmc4g\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:36 crc kubenswrapper[4932]: I1125 09:51:36.136361 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75x5t\" (UniqueName: \"kubernetes.io/projected/5f76439b-c387-4bf3-896a-15574e4ed843-kube-api-access-75x5t\") pod \"certified-operators-dmc4g\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:36 crc kubenswrapper[4932]: I1125 09:51:36.140932 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:36 crc kubenswrapper[4932]: I1125 09:51:36.464486 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dmc4g"] Nov 25 09:51:36 crc kubenswrapper[4932]: I1125 09:51:36.713576 4932 generic.go:334] "Generic (PLEG): container finished" podID="5f76439b-c387-4bf3-896a-15574e4ed843" containerID="64fc3a9bd9d8a725b95255369b0ffa7946b811a10dea0340495540b04901b2e9" exitCode=0 Nov 25 09:51:36 crc kubenswrapper[4932]: I1125 09:51:36.713621 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmc4g" event={"ID":"5f76439b-c387-4bf3-896a-15574e4ed843","Type":"ContainerDied","Data":"64fc3a9bd9d8a725b95255369b0ffa7946b811a10dea0340495540b04901b2e9"} Nov 25 09:51:36 crc kubenswrapper[4932]: I1125 09:51:36.713646 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmc4g" event={"ID":"5f76439b-c387-4bf3-896a-15574e4ed843","Type":"ContainerStarted","Data":"74f026deefe1c9cef9cdd2f07c78c9b4c32a099f12595acb2b9c6316d3ac4b42"} Nov 25 09:51:37 crc kubenswrapper[4932]: I1125 09:51:37.721920 4932 generic.go:334] "Generic (PLEG): container finished" podID="5f76439b-c387-4bf3-896a-15574e4ed843" containerID="a1c82220fc4a18405e20e4d0e75cb590f3c45482fd5136ee8b02ac90d65c570f" exitCode=0 Nov 25 09:51:37 crc kubenswrapper[4932]: I1125 09:51:37.722032 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmc4g" event={"ID":"5f76439b-c387-4bf3-896a-15574e4ed843","Type":"ContainerDied","Data":"a1c82220fc4a18405e20e4d0e75cb590f3c45482fd5136ee8b02ac90d65c570f"} Nov 25 09:51:38 crc kubenswrapper[4932]: I1125 09:51:38.731407 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmc4g" event={"ID":"5f76439b-c387-4bf3-896a-15574e4ed843","Type":"ContainerStarted","Data":"7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1"} Nov 25 09:51:38 crc kubenswrapper[4932]: I1125 09:51:38.799260 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dmc4g" podStartSLOduration=2.390578893 podStartE2EDuration="3.799235823s" podCreationTimestamp="2025-11-25 09:51:35 +0000 UTC" firstStartedPulling="2025-11-25 09:51:36.71494264 +0000 UTC m=+3756.840972203" lastFinishedPulling="2025-11-25 09:51:38.12359957 +0000 UTC m=+3758.249629133" observedRunningTime="2025-11-25 09:51:38.794930301 +0000 UTC m=+3758.920959874" watchObservedRunningTime="2025-11-25 09:51:38.799235823 +0000 UTC m=+3758.925265386" Nov 25 09:51:46 crc kubenswrapper[4932]: I1125 09:51:46.141910 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:46 crc kubenswrapper[4932]: I1125 09:51:46.142557 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:46 crc kubenswrapper[4932]: I1125 09:51:46.191492 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:46 crc kubenswrapper[4932]: I1125 09:51:46.836624 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:46 crc kubenswrapper[4932]: I1125 09:51:46.880361 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dmc4g"] Nov 25 09:51:48 crc kubenswrapper[4932]: I1125 09:51:48.605692 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:51:48 crc kubenswrapper[4932]: E1125 09:51:48.607218 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:51:48 crc kubenswrapper[4932]: I1125 09:51:48.801246 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dmc4g" podUID="5f76439b-c387-4bf3-896a-15574e4ed843" containerName="registry-server" containerID="cri-o://7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1" gracePeriod=2 Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.205805 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.309534 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-catalog-content\") pod \"5f76439b-c387-4bf3-896a-15574e4ed843\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.309619 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-utilities\") pod \"5f76439b-c387-4bf3-896a-15574e4ed843\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.309699 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75x5t\" (UniqueName: \"kubernetes.io/projected/5f76439b-c387-4bf3-896a-15574e4ed843-kube-api-access-75x5t\") pod \"5f76439b-c387-4bf3-896a-15574e4ed843\" (UID: \"5f76439b-c387-4bf3-896a-15574e4ed843\") " Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.310555 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-utilities" (OuterVolumeSpecName: "utilities") pod "5f76439b-c387-4bf3-896a-15574e4ed843" (UID: "5f76439b-c387-4bf3-896a-15574e4ed843"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.315111 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f76439b-c387-4bf3-896a-15574e4ed843-kube-api-access-75x5t" (OuterVolumeSpecName: "kube-api-access-75x5t") pod "5f76439b-c387-4bf3-896a-15574e4ed843" (UID: "5f76439b-c387-4bf3-896a-15574e4ed843"). InnerVolumeSpecName "kube-api-access-75x5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.356596 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f76439b-c387-4bf3-896a-15574e4ed843" (UID: "5f76439b-c387-4bf3-896a-15574e4ed843"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.411184 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.411567 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f76439b-c387-4bf3-896a-15574e4ed843-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.411690 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75x5t\" (UniqueName: \"kubernetes.io/projected/5f76439b-c387-4bf3-896a-15574e4ed843-kube-api-access-75x5t\") on node \"crc\" DevicePath \"\"" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.810963 4932 generic.go:334] "Generic (PLEG): container finished" podID="5f76439b-c387-4bf3-896a-15574e4ed843" containerID="7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1" exitCode=0 Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.811026 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmc4g" event={"ID":"5f76439b-c387-4bf3-896a-15574e4ed843","Type":"ContainerDied","Data":"7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1"} Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.811056 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dmc4g" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.811085 4932 scope.go:117] "RemoveContainer" containerID="7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.811068 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dmc4g" event={"ID":"5f76439b-c387-4bf3-896a-15574e4ed843","Type":"ContainerDied","Data":"74f026deefe1c9cef9cdd2f07c78c9b4c32a099f12595acb2b9c6316d3ac4b42"} Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.832753 4932 scope.go:117] "RemoveContainer" containerID="a1c82220fc4a18405e20e4d0e75cb590f3c45482fd5136ee8b02ac90d65c570f" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.866285 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dmc4g"] Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.876292 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dmc4g"] Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.877256 4932 scope.go:117] "RemoveContainer" containerID="64fc3a9bd9d8a725b95255369b0ffa7946b811a10dea0340495540b04901b2e9" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.907491 4932 scope.go:117] "RemoveContainer" containerID="7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1" Nov 25 09:51:49 crc kubenswrapper[4932]: E1125 09:51:49.908256 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1\": container with ID starting with 7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1 not found: ID does not exist" containerID="7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.908614 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1"} err="failed to get container status \"7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1\": rpc error: code = NotFound desc = could not find container \"7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1\": container with ID starting with 7aa0b7e9f92cfd04bf19cbf7e5a4877374855ac313f161ee7745ae584786f6f1 not found: ID does not exist" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.908673 4932 scope.go:117] "RemoveContainer" containerID="a1c82220fc4a18405e20e4d0e75cb590f3c45482fd5136ee8b02ac90d65c570f" Nov 25 09:51:49 crc kubenswrapper[4932]: E1125 09:51:49.909350 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1c82220fc4a18405e20e4d0e75cb590f3c45482fd5136ee8b02ac90d65c570f\": container with ID starting with a1c82220fc4a18405e20e4d0e75cb590f3c45482fd5136ee8b02ac90d65c570f not found: ID does not exist" containerID="a1c82220fc4a18405e20e4d0e75cb590f3c45482fd5136ee8b02ac90d65c570f" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.909391 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1c82220fc4a18405e20e4d0e75cb590f3c45482fd5136ee8b02ac90d65c570f"} err="failed to get container status \"a1c82220fc4a18405e20e4d0e75cb590f3c45482fd5136ee8b02ac90d65c570f\": rpc error: code = NotFound desc = could not find container \"a1c82220fc4a18405e20e4d0e75cb590f3c45482fd5136ee8b02ac90d65c570f\": container with ID starting with a1c82220fc4a18405e20e4d0e75cb590f3c45482fd5136ee8b02ac90d65c570f not found: ID does not exist" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.909423 4932 scope.go:117] "RemoveContainer" containerID="64fc3a9bd9d8a725b95255369b0ffa7946b811a10dea0340495540b04901b2e9" Nov 25 09:51:49 crc kubenswrapper[4932]: E1125 09:51:49.909698 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64fc3a9bd9d8a725b95255369b0ffa7946b811a10dea0340495540b04901b2e9\": container with ID starting with 64fc3a9bd9d8a725b95255369b0ffa7946b811a10dea0340495540b04901b2e9 not found: ID does not exist" containerID="64fc3a9bd9d8a725b95255369b0ffa7946b811a10dea0340495540b04901b2e9" Nov 25 09:51:49 crc kubenswrapper[4932]: I1125 09:51:49.909754 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64fc3a9bd9d8a725b95255369b0ffa7946b811a10dea0340495540b04901b2e9"} err="failed to get container status \"64fc3a9bd9d8a725b95255369b0ffa7946b811a10dea0340495540b04901b2e9\": rpc error: code = NotFound desc = could not find container \"64fc3a9bd9d8a725b95255369b0ffa7946b811a10dea0340495540b04901b2e9\": container with ID starting with 64fc3a9bd9d8a725b95255369b0ffa7946b811a10dea0340495540b04901b2e9 not found: ID does not exist" Nov 25 09:51:50 crc kubenswrapper[4932]: I1125 09:51:50.614823 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f76439b-c387-4bf3-896a-15574e4ed843" path="/var/lib/kubelet/pods/5f76439b-c387-4bf3-896a-15574e4ed843/volumes" Nov 25 09:52:00 crc kubenswrapper[4932]: I1125 09:52:00.613075 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:52:00 crc kubenswrapper[4932]: E1125 09:52:00.614115 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:52:11 crc kubenswrapper[4932]: I1125 09:52:11.608720 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:52:11 crc kubenswrapper[4932]: E1125 09:52:11.609945 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:52:26 crc kubenswrapper[4932]: I1125 09:52:26.606328 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:52:26 crc kubenswrapper[4932]: E1125 09:52:26.607532 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:52:40 crc kubenswrapper[4932]: I1125 09:52:40.610571 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:52:40 crc kubenswrapper[4932]: E1125 09:52:40.611910 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:52:51 crc kubenswrapper[4932]: I1125 09:52:51.606429 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:52:51 crc kubenswrapper[4932]: E1125 09:52:51.607685 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:53:03 crc kubenswrapper[4932]: I1125 09:53:03.605951 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:53:03 crc kubenswrapper[4932]: E1125 09:53:03.606724 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:53:17 crc kubenswrapper[4932]: I1125 09:53:17.605389 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:53:17 crc kubenswrapper[4932]: E1125 09:53:17.606180 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:53:32 crc kubenswrapper[4932]: I1125 09:53:32.605626 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:53:32 crc kubenswrapper[4932]: E1125 09:53:32.606492 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:53:44 crc kubenswrapper[4932]: I1125 09:53:44.606344 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:53:44 crc kubenswrapper[4932]: E1125 09:53:44.607418 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:53:56 crc kubenswrapper[4932]: I1125 09:53:56.606293 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:53:56 crc kubenswrapper[4932]: E1125 09:53:56.607442 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:54:10 crc kubenswrapper[4932]: I1125 09:54:10.618450 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:54:10 crc kubenswrapper[4932]: E1125 09:54:10.619939 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:54:23 crc kubenswrapper[4932]: I1125 09:54:23.605841 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:54:23 crc kubenswrapper[4932]: E1125 09:54:23.606570 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:54:35 crc kubenswrapper[4932]: I1125 09:54:35.606234 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:54:35 crc kubenswrapper[4932]: E1125 09:54:35.606886 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 09:54:50 crc kubenswrapper[4932]: I1125 09:54:50.616423 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 09:54:51 crc kubenswrapper[4932]: I1125 09:54:51.346792 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"1e69d2d7b067068ef393101ca1c3436cce542093894f5fecc6c1435331f4ff20"} Nov 25 09:57:07 crc kubenswrapper[4932]: I1125 09:57:07.181489 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:57:07 crc kubenswrapper[4932]: I1125 09:57:07.182278 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:57:37 crc kubenswrapper[4932]: I1125 09:57:37.181003 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:57:37 crc kubenswrapper[4932]: I1125 09:57:37.181634 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:58:07 crc kubenswrapper[4932]: I1125 09:58:07.181423 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 09:58:07 crc kubenswrapper[4932]: I1125 09:58:07.182144 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 09:58:07 crc kubenswrapper[4932]: I1125 09:58:07.182285 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 09:58:07 crc kubenswrapper[4932]: I1125 09:58:07.183258 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1e69d2d7b067068ef393101ca1c3436cce542093894f5fecc6c1435331f4ff20"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 09:58:07 crc kubenswrapper[4932]: I1125 09:58:07.183355 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://1e69d2d7b067068ef393101ca1c3436cce542093894f5fecc6c1435331f4ff20" gracePeriod=600 Nov 25 09:58:08 crc kubenswrapper[4932]: I1125 09:58:08.102098 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="1e69d2d7b067068ef393101ca1c3436cce542093894f5fecc6c1435331f4ff20" exitCode=0 Nov 25 09:58:08 crc kubenswrapper[4932]: I1125 09:58:08.102170 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"1e69d2d7b067068ef393101ca1c3436cce542093894f5fecc6c1435331f4ff20"} Nov 25 09:58:08 crc kubenswrapper[4932]: I1125 09:58:08.103040 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94"} Nov 25 09:58:08 crc kubenswrapper[4932]: I1125 09:58:08.103081 4932 scope.go:117] "RemoveContainer" containerID="fca385c0eddeb9069188dbe162c044fc4ae5f29015e68247d26f7037bc6ac7fa" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.159242 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn"] Nov 25 10:00:00 crc kubenswrapper[4932]: E1125 10:00:00.161498 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f76439b-c387-4bf3-896a-15574e4ed843" containerName="extract-content" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.161635 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f76439b-c387-4bf3-896a-15574e4ed843" containerName="extract-content" Nov 25 10:00:00 crc kubenswrapper[4932]: E1125 10:00:00.161709 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f76439b-c387-4bf3-896a-15574e4ed843" containerName="extract-utilities" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.161768 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f76439b-c387-4bf3-896a-15574e4ed843" containerName="extract-utilities" Nov 25 10:00:00 crc kubenswrapper[4932]: E1125 10:00:00.161835 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f76439b-c387-4bf3-896a-15574e4ed843" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.161894 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f76439b-c387-4bf3-896a-15574e4ed843" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.162122 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f76439b-c387-4bf3-896a-15574e4ed843" containerName="registry-server" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.162865 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.165720 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.167860 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn"] Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.168520 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.229731 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-config-volume\") pod \"collect-profiles-29401080-ttwfn\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.229783 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92xf9\" (UniqueName: \"kubernetes.io/projected/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-kube-api-access-92xf9\") pod \"collect-profiles-29401080-ttwfn\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.229849 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-secret-volume\") pod \"collect-profiles-29401080-ttwfn\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.330828 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-config-volume\") pod \"collect-profiles-29401080-ttwfn\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.331267 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92xf9\" (UniqueName: \"kubernetes.io/projected/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-kube-api-access-92xf9\") pod \"collect-profiles-29401080-ttwfn\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.331331 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-secret-volume\") pod \"collect-profiles-29401080-ttwfn\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.332288 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-config-volume\") pod \"collect-profiles-29401080-ttwfn\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.336738 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-secret-volume\") pod \"collect-profiles-29401080-ttwfn\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.353465 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92xf9\" (UniqueName: \"kubernetes.io/projected/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-kube-api-access-92xf9\") pod \"collect-profiles-29401080-ttwfn\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.491218 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:00 crc kubenswrapper[4932]: I1125 10:00:00.994403 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn"] Nov 25 10:00:01 crc kubenswrapper[4932]: I1125 10:00:01.115059 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" event={"ID":"1cf2e72d-629a-4db8-98e3-685af3ddd0e4","Type":"ContainerStarted","Data":"fd8fcf98e972b2d8747e54bff530bf840eeb74d63d6ade13b8248a776f319bf9"} Nov 25 10:00:02 crc kubenswrapper[4932]: I1125 10:00:02.123598 4932 generic.go:334] "Generic (PLEG): container finished" podID="1cf2e72d-629a-4db8-98e3-685af3ddd0e4" containerID="a7f7010ad95e2d17bef0ecc28ff99e10eb89004f25f84bb5361e70a0715c9e09" exitCode=0 Nov 25 10:00:02 crc kubenswrapper[4932]: I1125 10:00:02.124143 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" event={"ID":"1cf2e72d-629a-4db8-98e3-685af3ddd0e4","Type":"ContainerDied","Data":"a7f7010ad95e2d17bef0ecc28ff99e10eb89004f25f84bb5361e70a0715c9e09"} Nov 25 10:00:03 crc kubenswrapper[4932]: I1125 10:00:03.398435 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:03 crc kubenswrapper[4932]: I1125 10:00:03.580448 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-config-volume\") pod \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " Nov 25 10:00:03 crc kubenswrapper[4932]: I1125 10:00:03.580550 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92xf9\" (UniqueName: \"kubernetes.io/projected/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-kube-api-access-92xf9\") pod \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " Nov 25 10:00:03 crc kubenswrapper[4932]: I1125 10:00:03.580771 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-secret-volume\") pod \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\" (UID: \"1cf2e72d-629a-4db8-98e3-685af3ddd0e4\") " Nov 25 10:00:03 crc kubenswrapper[4932]: I1125 10:00:03.581986 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-config-volume" (OuterVolumeSpecName: "config-volume") pod "1cf2e72d-629a-4db8-98e3-685af3ddd0e4" (UID: "1cf2e72d-629a-4db8-98e3-685af3ddd0e4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:00:03 crc kubenswrapper[4932]: I1125 10:00:03.593499 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-kube-api-access-92xf9" (OuterVolumeSpecName: "kube-api-access-92xf9") pod "1cf2e72d-629a-4db8-98e3-685af3ddd0e4" (UID: "1cf2e72d-629a-4db8-98e3-685af3ddd0e4"). InnerVolumeSpecName "kube-api-access-92xf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:03 crc kubenswrapper[4932]: I1125 10:00:03.601559 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1cf2e72d-629a-4db8-98e3-685af3ddd0e4" (UID: "1cf2e72d-629a-4db8-98e3-685af3ddd0e4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:00:03 crc kubenswrapper[4932]: I1125 10:00:03.682859 4932 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:03 crc kubenswrapper[4932]: I1125 10:00:03.682931 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:03 crc kubenswrapper[4932]: I1125 10:00:03.682967 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92xf9\" (UniqueName: \"kubernetes.io/projected/1cf2e72d-629a-4db8-98e3-685af3ddd0e4-kube-api-access-92xf9\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.141733 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" event={"ID":"1cf2e72d-629a-4db8-98e3-685af3ddd0e4","Type":"ContainerDied","Data":"fd8fcf98e972b2d8747e54bff530bf840eeb74d63d6ade13b8248a776f319bf9"} Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.142102 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd8fcf98e972b2d8747e54bff530bf840eeb74d63d6ade13b8248a776f319bf9" Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.141797 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn" Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.468538 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8"] Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.473454 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401035-pm6g8"] Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.621074 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aab3e506-147f-41f1-899e-013c5126dfea" path="/var/lib/kubelet/pods/aab3e506-147f-41f1-899e-013c5126dfea/volumes" Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.863494 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-m68wh"] Nov 25 10:00:04 crc kubenswrapper[4932]: E1125 10:00:04.863899 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cf2e72d-629a-4db8-98e3-685af3ddd0e4" containerName="collect-profiles" Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.863913 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cf2e72d-629a-4db8-98e3-685af3ddd0e4" containerName="collect-profiles" Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.864147 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cf2e72d-629a-4db8-98e3-685af3ddd0e4" containerName="collect-profiles" Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.866218 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.875951 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m68wh"] Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.903103 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-utilities\") pod \"community-operators-m68wh\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.903180 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-catalog-content\") pod \"community-operators-m68wh\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:04 crc kubenswrapper[4932]: I1125 10:00:04.903292 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nj5c4\" (UniqueName: \"kubernetes.io/projected/a04e24d6-45af-438b-b32f-05f0e0b94725-kube-api-access-nj5c4\") pod \"community-operators-m68wh\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:05 crc kubenswrapper[4932]: I1125 10:00:05.004276 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nj5c4\" (UniqueName: \"kubernetes.io/projected/a04e24d6-45af-438b-b32f-05f0e0b94725-kube-api-access-nj5c4\") pod \"community-operators-m68wh\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:05 crc kubenswrapper[4932]: I1125 10:00:05.004358 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-utilities\") pod \"community-operators-m68wh\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:05 crc kubenswrapper[4932]: I1125 10:00:05.004393 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-catalog-content\") pod \"community-operators-m68wh\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:05 crc kubenswrapper[4932]: I1125 10:00:05.005294 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-catalog-content\") pod \"community-operators-m68wh\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:05 crc kubenswrapper[4932]: I1125 10:00:05.005556 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-utilities\") pod \"community-operators-m68wh\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:05 crc kubenswrapper[4932]: I1125 10:00:05.027609 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nj5c4\" (UniqueName: \"kubernetes.io/projected/a04e24d6-45af-438b-b32f-05f0e0b94725-kube-api-access-nj5c4\") pod \"community-operators-m68wh\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:05 crc kubenswrapper[4932]: I1125 10:00:05.195771 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:05 crc kubenswrapper[4932]: I1125 10:00:05.513233 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-m68wh"] Nov 25 10:00:05 crc kubenswrapper[4932]: W1125 10:00:05.525373 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda04e24d6_45af_438b_b32f_05f0e0b94725.slice/crio-d84d64441e2b2b31e569d16748ad3c7e06a46fd1a3bc62e0fb190835f95251d5 WatchSource:0}: Error finding container d84d64441e2b2b31e569d16748ad3c7e06a46fd1a3bc62e0fb190835f95251d5: Status 404 returned error can't find the container with id d84d64441e2b2b31e569d16748ad3c7e06a46fd1a3bc62e0fb190835f95251d5 Nov 25 10:00:06 crc kubenswrapper[4932]: I1125 10:00:06.170037 4932 generic.go:334] "Generic (PLEG): container finished" podID="a04e24d6-45af-438b-b32f-05f0e0b94725" containerID="04159635dd263e0f0b0f15af7af7d2429c7e6c85ff0a2d17d57b15611128ca18" exitCode=0 Nov 25 10:00:06 crc kubenswrapper[4932]: I1125 10:00:06.170130 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m68wh" event={"ID":"a04e24d6-45af-438b-b32f-05f0e0b94725","Type":"ContainerDied","Data":"04159635dd263e0f0b0f15af7af7d2429c7e6c85ff0a2d17d57b15611128ca18"} Nov 25 10:00:06 crc kubenswrapper[4932]: I1125 10:00:06.170376 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m68wh" event={"ID":"a04e24d6-45af-438b-b32f-05f0e0b94725","Type":"ContainerStarted","Data":"d84d64441e2b2b31e569d16748ad3c7e06a46fd1a3bc62e0fb190835f95251d5"} Nov 25 10:00:06 crc kubenswrapper[4932]: I1125 10:00:06.172029 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:00:07 crc kubenswrapper[4932]: I1125 10:00:07.178884 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m68wh" event={"ID":"a04e24d6-45af-438b-b32f-05f0e0b94725","Type":"ContainerStarted","Data":"8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d"} Nov 25 10:00:07 crc kubenswrapper[4932]: I1125 10:00:07.180910 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:00:07 crc kubenswrapper[4932]: I1125 10:00:07.181018 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:00:08 crc kubenswrapper[4932]: I1125 10:00:08.191560 4932 generic.go:334] "Generic (PLEG): container finished" podID="a04e24d6-45af-438b-b32f-05f0e0b94725" containerID="8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d" exitCode=0 Nov 25 10:00:08 crc kubenswrapper[4932]: I1125 10:00:08.191678 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m68wh" event={"ID":"a04e24d6-45af-438b-b32f-05f0e0b94725","Type":"ContainerDied","Data":"8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d"} Nov 25 10:00:08 crc kubenswrapper[4932]: I1125 10:00:08.360838 4932 scope.go:117] "RemoveContainer" containerID="9d4cc229698d0944ed0ff1940bf0e8cd47f89a4a582470666c5c6b89f1fc3ffa" Nov 25 10:00:09 crc kubenswrapper[4932]: I1125 10:00:09.203904 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m68wh" event={"ID":"a04e24d6-45af-438b-b32f-05f0e0b94725","Type":"ContainerStarted","Data":"08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f"} Nov 25 10:00:09 crc kubenswrapper[4932]: I1125 10:00:09.222074 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-m68wh" podStartSLOduration=2.833030736 podStartE2EDuration="5.222049858s" podCreationTimestamp="2025-11-25 10:00:04 +0000 UTC" firstStartedPulling="2025-11-25 10:00:06.171804185 +0000 UTC m=+4266.297833748" lastFinishedPulling="2025-11-25 10:00:08.560823267 +0000 UTC m=+4268.686852870" observedRunningTime="2025-11-25 10:00:09.220974607 +0000 UTC m=+4269.347004170" watchObservedRunningTime="2025-11-25 10:00:09.222049858 +0000 UTC m=+4269.348079421" Nov 25 10:00:12 crc kubenswrapper[4932]: I1125 10:00:12.827567 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qtzc4"] Nov 25 10:00:12 crc kubenswrapper[4932]: I1125 10:00:12.830261 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:12 crc kubenswrapper[4932]: I1125 10:00:12.855256 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-catalog-content\") pod \"redhat-marketplace-qtzc4\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:12 crc kubenswrapper[4932]: I1125 10:00:12.855987 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4k9q\" (UniqueName: \"kubernetes.io/projected/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-kube-api-access-g4k9q\") pod \"redhat-marketplace-qtzc4\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:12 crc kubenswrapper[4932]: I1125 10:00:12.856268 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-utilities\") pod \"redhat-marketplace-qtzc4\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:12 crc kubenswrapper[4932]: I1125 10:00:12.874006 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qtzc4"] Nov 25 10:00:12 crc kubenswrapper[4932]: I1125 10:00:12.957952 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4k9q\" (UniqueName: \"kubernetes.io/projected/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-kube-api-access-g4k9q\") pod \"redhat-marketplace-qtzc4\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:12 crc kubenswrapper[4932]: I1125 10:00:12.958351 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-utilities\") pod \"redhat-marketplace-qtzc4\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:12 crc kubenswrapper[4932]: I1125 10:00:12.958517 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-catalog-content\") pod \"redhat-marketplace-qtzc4\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:12 crc kubenswrapper[4932]: I1125 10:00:12.958992 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-utilities\") pod \"redhat-marketplace-qtzc4\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:12 crc kubenswrapper[4932]: I1125 10:00:12.959144 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-catalog-content\") pod \"redhat-marketplace-qtzc4\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:13 crc kubenswrapper[4932]: I1125 10:00:13.063146 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4k9q\" (UniqueName: \"kubernetes.io/projected/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-kube-api-access-g4k9q\") pod \"redhat-marketplace-qtzc4\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:13 crc kubenswrapper[4932]: I1125 10:00:13.170248 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:13 crc kubenswrapper[4932]: I1125 10:00:13.616202 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qtzc4"] Nov 25 10:00:13 crc kubenswrapper[4932]: W1125 10:00:13.625074 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26bb7515_cbe4_4f78_b161_c6a6381c5d4e.slice/crio-a02d4c87af99ef2dd835116091cfcde06c5d1c913be5fca0f0a0b153d5c3eac9 WatchSource:0}: Error finding container a02d4c87af99ef2dd835116091cfcde06c5d1c913be5fca0f0a0b153d5c3eac9: Status 404 returned error can't find the container with id a02d4c87af99ef2dd835116091cfcde06c5d1c913be5fca0f0a0b153d5c3eac9 Nov 25 10:00:14 crc kubenswrapper[4932]: I1125 10:00:14.250913 4932 generic.go:334] "Generic (PLEG): container finished" podID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" containerID="4df5fa1be39ec85fa140b08c717bc9898d249b1741422a73c5ba8400b8894202" exitCode=0 Nov 25 10:00:14 crc kubenswrapper[4932]: I1125 10:00:14.251007 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtzc4" event={"ID":"26bb7515-cbe4-4f78-b161-c6a6381c5d4e","Type":"ContainerDied","Data":"4df5fa1be39ec85fa140b08c717bc9898d249b1741422a73c5ba8400b8894202"} Nov 25 10:00:14 crc kubenswrapper[4932]: I1125 10:00:14.251211 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtzc4" event={"ID":"26bb7515-cbe4-4f78-b161-c6a6381c5d4e","Type":"ContainerStarted","Data":"a02d4c87af99ef2dd835116091cfcde06c5d1c913be5fca0f0a0b153d5c3eac9"} Nov 25 10:00:15 crc kubenswrapper[4932]: I1125 10:00:15.196602 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:15 crc kubenswrapper[4932]: I1125 10:00:15.197424 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:15 crc kubenswrapper[4932]: I1125 10:00:15.263842 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:16 crc kubenswrapper[4932]: I1125 10:00:16.272091 4932 generic.go:334] "Generic (PLEG): container finished" podID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" containerID="cf2dd9a72051217b2cf6717c970e5efeb25b4b275c1f46ba55dd50362fec5f94" exitCode=0 Nov 25 10:00:16 crc kubenswrapper[4932]: I1125 10:00:16.272173 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtzc4" event={"ID":"26bb7515-cbe4-4f78-b161-c6a6381c5d4e","Type":"ContainerDied","Data":"cf2dd9a72051217b2cf6717c970e5efeb25b4b275c1f46ba55dd50362fec5f94"} Nov 25 10:00:16 crc kubenswrapper[4932]: I1125 10:00:16.321352 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:17 crc kubenswrapper[4932]: I1125 10:00:17.283846 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtzc4" event={"ID":"26bb7515-cbe4-4f78-b161-c6a6381c5d4e","Type":"ContainerStarted","Data":"4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986"} Nov 25 10:00:17 crc kubenswrapper[4932]: I1125 10:00:17.311634 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qtzc4" podStartSLOduration=2.864746067 podStartE2EDuration="5.311607089s" podCreationTimestamp="2025-11-25 10:00:12 +0000 UTC" firstStartedPulling="2025-11-25 10:00:14.252495331 +0000 UTC m=+4274.378524904" lastFinishedPulling="2025-11-25 10:00:16.699356323 +0000 UTC m=+4276.825385926" observedRunningTime="2025-11-25 10:00:17.301749836 +0000 UTC m=+4277.427779429" watchObservedRunningTime="2025-11-25 10:00:17.311607089 +0000 UTC m=+4277.437636672" Nov 25 10:00:17 crc kubenswrapper[4932]: I1125 10:00:17.640480 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m68wh"] Nov 25 10:00:18 crc kubenswrapper[4932]: I1125 10:00:18.290664 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-m68wh" podUID="a04e24d6-45af-438b-b32f-05f0e0b94725" containerName="registry-server" containerID="cri-o://08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f" gracePeriod=2 Nov 25 10:00:18 crc kubenswrapper[4932]: I1125 10:00:18.702936 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:18 crc kubenswrapper[4932]: I1125 10:00:18.853247 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-utilities\") pod \"a04e24d6-45af-438b-b32f-05f0e0b94725\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " Nov 25 10:00:18 crc kubenswrapper[4932]: I1125 10:00:18.853362 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-catalog-content\") pod \"a04e24d6-45af-438b-b32f-05f0e0b94725\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " Nov 25 10:00:18 crc kubenswrapper[4932]: I1125 10:00:18.853431 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nj5c4\" (UniqueName: \"kubernetes.io/projected/a04e24d6-45af-438b-b32f-05f0e0b94725-kube-api-access-nj5c4\") pod \"a04e24d6-45af-438b-b32f-05f0e0b94725\" (UID: \"a04e24d6-45af-438b-b32f-05f0e0b94725\") " Nov 25 10:00:18 crc kubenswrapper[4932]: I1125 10:00:18.854686 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-utilities" (OuterVolumeSpecName: "utilities") pod "a04e24d6-45af-438b-b32f-05f0e0b94725" (UID: "a04e24d6-45af-438b-b32f-05f0e0b94725"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:18 crc kubenswrapper[4932]: I1125 10:00:18.862949 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a04e24d6-45af-438b-b32f-05f0e0b94725-kube-api-access-nj5c4" (OuterVolumeSpecName: "kube-api-access-nj5c4") pod "a04e24d6-45af-438b-b32f-05f0e0b94725" (UID: "a04e24d6-45af-438b-b32f-05f0e0b94725"). InnerVolumeSpecName "kube-api-access-nj5c4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:18 crc kubenswrapper[4932]: I1125 10:00:18.918133 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a04e24d6-45af-438b-b32f-05f0e0b94725" (UID: "a04e24d6-45af-438b-b32f-05f0e0b94725"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:18 crc kubenswrapper[4932]: I1125 10:00:18.955388 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:18 crc kubenswrapper[4932]: I1125 10:00:18.955429 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a04e24d6-45af-438b-b32f-05f0e0b94725-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:18 crc kubenswrapper[4932]: I1125 10:00:18.955443 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nj5c4\" (UniqueName: \"kubernetes.io/projected/a04e24d6-45af-438b-b32f-05f0e0b94725-kube-api-access-nj5c4\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.301528 4932 generic.go:334] "Generic (PLEG): container finished" podID="a04e24d6-45af-438b-b32f-05f0e0b94725" containerID="08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f" exitCode=0 Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.301576 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m68wh" event={"ID":"a04e24d6-45af-438b-b32f-05f0e0b94725","Type":"ContainerDied","Data":"08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f"} Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.301605 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-m68wh" event={"ID":"a04e24d6-45af-438b-b32f-05f0e0b94725","Type":"ContainerDied","Data":"d84d64441e2b2b31e569d16748ad3c7e06a46fd1a3bc62e0fb190835f95251d5"} Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.301628 4932 scope.go:117] "RemoveContainer" containerID="08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.301655 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-m68wh" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.326055 4932 scope.go:117] "RemoveContainer" containerID="8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.343234 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-m68wh"] Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.348543 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-m68wh"] Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.359560 4932 scope.go:117] "RemoveContainer" containerID="04159635dd263e0f0b0f15af7af7d2429c7e6c85ff0a2d17d57b15611128ca18" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.378612 4932 scope.go:117] "RemoveContainer" containerID="08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f" Nov 25 10:00:19 crc kubenswrapper[4932]: E1125 10:00:19.379067 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f\": container with ID starting with 08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f not found: ID does not exist" containerID="08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.379122 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f"} err="failed to get container status \"08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f\": rpc error: code = NotFound desc = could not find container \"08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f\": container with ID starting with 08fb0864316df1eda54398388659ed2c7e1c37ba74f26c86ca9e040c3881a53f not found: ID does not exist" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.379153 4932 scope.go:117] "RemoveContainer" containerID="8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d" Nov 25 10:00:19 crc kubenswrapper[4932]: E1125 10:00:19.379511 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d\": container with ID starting with 8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d not found: ID does not exist" containerID="8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.379531 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d"} err="failed to get container status \"8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d\": rpc error: code = NotFound desc = could not find container \"8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d\": container with ID starting with 8f4b186a11dfa1d927540e776276813872038b207db841737b5a605a492d0e2d not found: ID does not exist" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.379543 4932 scope.go:117] "RemoveContainer" containerID="04159635dd263e0f0b0f15af7af7d2429c7e6c85ff0a2d17d57b15611128ca18" Nov 25 10:00:19 crc kubenswrapper[4932]: E1125 10:00:19.379818 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04159635dd263e0f0b0f15af7af7d2429c7e6c85ff0a2d17d57b15611128ca18\": container with ID starting with 04159635dd263e0f0b0f15af7af7d2429c7e6c85ff0a2d17d57b15611128ca18 not found: ID does not exist" containerID="04159635dd263e0f0b0f15af7af7d2429c7e6c85ff0a2d17d57b15611128ca18" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.379847 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04159635dd263e0f0b0f15af7af7d2429c7e6c85ff0a2d17d57b15611128ca18"} err="failed to get container status \"04159635dd263e0f0b0f15af7af7d2429c7e6c85ff0a2d17d57b15611128ca18\": rpc error: code = NotFound desc = could not find container \"04159635dd263e0f0b0f15af7af7d2429c7e6c85ff0a2d17d57b15611128ca18\": container with ID starting with 04159635dd263e0f0b0f15af7af7d2429c7e6c85ff0a2d17d57b15611128ca18 not found: ID does not exist" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.451826 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5pnq4"] Nov 25 10:00:19 crc kubenswrapper[4932]: E1125 10:00:19.452332 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a04e24d6-45af-438b-b32f-05f0e0b94725" containerName="registry-server" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.452351 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a04e24d6-45af-438b-b32f-05f0e0b94725" containerName="registry-server" Nov 25 10:00:19 crc kubenswrapper[4932]: E1125 10:00:19.452378 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a04e24d6-45af-438b-b32f-05f0e0b94725" containerName="extract-content" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.452387 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a04e24d6-45af-438b-b32f-05f0e0b94725" containerName="extract-content" Nov 25 10:00:19 crc kubenswrapper[4932]: E1125 10:00:19.452405 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a04e24d6-45af-438b-b32f-05f0e0b94725" containerName="extract-utilities" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.452413 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a04e24d6-45af-438b-b32f-05f0e0b94725" containerName="extract-utilities" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.452619 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a04e24d6-45af-438b-b32f-05f0e0b94725" containerName="registry-server" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.453993 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.463970 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrmnf\" (UniqueName: \"kubernetes.io/projected/29574742-75c9-4047-9489-1fb591f673d1-kube-api-access-rrmnf\") pod \"redhat-operators-5pnq4\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.464043 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-utilities\") pod \"redhat-operators-5pnq4\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.464351 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-catalog-content\") pod \"redhat-operators-5pnq4\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.467480 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5pnq4"] Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.565957 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-catalog-content\") pod \"redhat-operators-5pnq4\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.566393 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrmnf\" (UniqueName: \"kubernetes.io/projected/29574742-75c9-4047-9489-1fb591f673d1-kube-api-access-rrmnf\") pod \"redhat-operators-5pnq4\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.566702 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-utilities\") pod \"redhat-operators-5pnq4\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.567079 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-catalog-content\") pod \"redhat-operators-5pnq4\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.567156 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-utilities\") pod \"redhat-operators-5pnq4\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.585829 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrmnf\" (UniqueName: \"kubernetes.io/projected/29574742-75c9-4047-9489-1fb591f673d1-kube-api-access-rrmnf\") pod \"redhat-operators-5pnq4\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:19 crc kubenswrapper[4932]: I1125 10:00:19.781549 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:20 crc kubenswrapper[4932]: I1125 10:00:20.231776 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5pnq4"] Nov 25 10:00:20 crc kubenswrapper[4932]: W1125 10:00:20.236091 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod29574742_75c9_4047_9489_1fb591f673d1.slice/crio-c83fb9a5d6bc765fe64bea271252f697d08c64fd30b13a6ea93deb289e97fe72 WatchSource:0}: Error finding container c83fb9a5d6bc765fe64bea271252f697d08c64fd30b13a6ea93deb289e97fe72: Status 404 returned error can't find the container with id c83fb9a5d6bc765fe64bea271252f697d08c64fd30b13a6ea93deb289e97fe72 Nov 25 10:00:20 crc kubenswrapper[4932]: I1125 10:00:20.314161 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5pnq4" event={"ID":"29574742-75c9-4047-9489-1fb591f673d1","Type":"ContainerStarted","Data":"c83fb9a5d6bc765fe64bea271252f697d08c64fd30b13a6ea93deb289e97fe72"} Nov 25 10:00:20 crc kubenswrapper[4932]: I1125 10:00:20.617222 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a04e24d6-45af-438b-b32f-05f0e0b94725" path="/var/lib/kubelet/pods/a04e24d6-45af-438b-b32f-05f0e0b94725/volumes" Nov 25 10:00:21 crc kubenswrapper[4932]: I1125 10:00:21.325930 4932 generic.go:334] "Generic (PLEG): container finished" podID="29574742-75c9-4047-9489-1fb591f673d1" containerID="4f5291fee6db424bd6f87d1cb600b092cd268527931583ea7124763569114e38" exitCode=0 Nov 25 10:00:21 crc kubenswrapper[4932]: I1125 10:00:21.326096 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5pnq4" event={"ID":"29574742-75c9-4047-9489-1fb591f673d1","Type":"ContainerDied","Data":"4f5291fee6db424bd6f87d1cb600b092cd268527931583ea7124763569114e38"} Nov 25 10:00:22 crc kubenswrapper[4932]: I1125 10:00:22.335441 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5pnq4" event={"ID":"29574742-75c9-4047-9489-1fb591f673d1","Type":"ContainerStarted","Data":"561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285"} Nov 25 10:00:23 crc kubenswrapper[4932]: I1125 10:00:23.171150 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:23 crc kubenswrapper[4932]: I1125 10:00:23.171554 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:23 crc kubenswrapper[4932]: I1125 10:00:23.219511 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:23 crc kubenswrapper[4932]: I1125 10:00:23.347436 4932 generic.go:334] "Generic (PLEG): container finished" podID="29574742-75c9-4047-9489-1fb591f673d1" containerID="561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285" exitCode=0 Nov 25 10:00:23 crc kubenswrapper[4932]: I1125 10:00:23.348228 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5pnq4" event={"ID":"29574742-75c9-4047-9489-1fb591f673d1","Type":"ContainerDied","Data":"561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285"} Nov 25 10:00:23 crc kubenswrapper[4932]: I1125 10:00:23.391937 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:24 crc kubenswrapper[4932]: I1125 10:00:24.358629 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5pnq4" event={"ID":"29574742-75c9-4047-9489-1fb591f673d1","Type":"ContainerStarted","Data":"1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea"} Nov 25 10:00:24 crc kubenswrapper[4932]: I1125 10:00:24.381846 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5pnq4" podStartSLOduration=2.964429235 podStartE2EDuration="5.381825801s" podCreationTimestamp="2025-11-25 10:00:19 +0000 UTC" firstStartedPulling="2025-11-25 10:00:21.33095144 +0000 UTC m=+4281.456981043" lastFinishedPulling="2025-11-25 10:00:23.748348046 +0000 UTC m=+4283.874377609" observedRunningTime="2025-11-25 10:00:24.380962956 +0000 UTC m=+4284.506992549" watchObservedRunningTime="2025-11-25 10:00:24.381825801 +0000 UTC m=+4284.507855364" Nov 25 10:00:25 crc kubenswrapper[4932]: I1125 10:00:25.641995 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qtzc4"] Nov 25 10:00:25 crc kubenswrapper[4932]: I1125 10:00:25.642417 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qtzc4" podUID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" containerName="registry-server" containerID="cri-o://4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986" gracePeriod=2 Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.061047 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.166509 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-catalog-content\") pod \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.166579 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-utilities\") pod \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.166786 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4k9q\" (UniqueName: \"kubernetes.io/projected/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-kube-api-access-g4k9q\") pod \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\" (UID: \"26bb7515-cbe4-4f78-b161-c6a6381c5d4e\") " Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.168286 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-utilities" (OuterVolumeSpecName: "utilities") pod "26bb7515-cbe4-4f78-b161-c6a6381c5d4e" (UID: "26bb7515-cbe4-4f78-b161-c6a6381c5d4e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.186415 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "26bb7515-cbe4-4f78-b161-c6a6381c5d4e" (UID: "26bb7515-cbe4-4f78-b161-c6a6381c5d4e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.269044 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.269727 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.377466 4932 generic.go:334] "Generic (PLEG): container finished" podID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" containerID="4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986" exitCode=0 Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.377520 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qtzc4" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.377536 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtzc4" event={"ID":"26bb7515-cbe4-4f78-b161-c6a6381c5d4e","Type":"ContainerDied","Data":"4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986"} Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.377595 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qtzc4" event={"ID":"26bb7515-cbe4-4f78-b161-c6a6381c5d4e","Type":"ContainerDied","Data":"a02d4c87af99ef2dd835116091cfcde06c5d1c913be5fca0f0a0b153d5c3eac9"} Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.377619 4932 scope.go:117] "RemoveContainer" containerID="4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.397469 4932 scope.go:117] "RemoveContainer" containerID="cf2dd9a72051217b2cf6717c970e5efeb25b4b275c1f46ba55dd50362fec5f94" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.561813 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-kube-api-access-g4k9q" (OuterVolumeSpecName: "kube-api-access-g4k9q") pod "26bb7515-cbe4-4f78-b161-c6a6381c5d4e" (UID: "26bb7515-cbe4-4f78-b161-c6a6381c5d4e"). InnerVolumeSpecName "kube-api-access-g4k9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.572395 4932 scope.go:117] "RemoveContainer" containerID="4df5fa1be39ec85fa140b08c717bc9898d249b1741422a73c5ba8400b8894202" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.573567 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4k9q\" (UniqueName: \"kubernetes.io/projected/26bb7515-cbe4-4f78-b161-c6a6381c5d4e-kube-api-access-g4k9q\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.595497 4932 scope.go:117] "RemoveContainer" containerID="4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986" Nov 25 10:00:26 crc kubenswrapper[4932]: E1125 10:00:26.595920 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986\": container with ID starting with 4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986 not found: ID does not exist" containerID="4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.595969 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986"} err="failed to get container status \"4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986\": rpc error: code = NotFound desc = could not find container \"4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986\": container with ID starting with 4cd224907a796e6b0f8d2b3ad0728ecc8cb0a2759417feca24ad76360b473986 not found: ID does not exist" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.596003 4932 scope.go:117] "RemoveContainer" containerID="cf2dd9a72051217b2cf6717c970e5efeb25b4b275c1f46ba55dd50362fec5f94" Nov 25 10:00:26 crc kubenswrapper[4932]: E1125 10:00:26.596362 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf2dd9a72051217b2cf6717c970e5efeb25b4b275c1f46ba55dd50362fec5f94\": container with ID starting with cf2dd9a72051217b2cf6717c970e5efeb25b4b275c1f46ba55dd50362fec5f94 not found: ID does not exist" containerID="cf2dd9a72051217b2cf6717c970e5efeb25b4b275c1f46ba55dd50362fec5f94" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.596473 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf2dd9a72051217b2cf6717c970e5efeb25b4b275c1f46ba55dd50362fec5f94"} err="failed to get container status \"cf2dd9a72051217b2cf6717c970e5efeb25b4b275c1f46ba55dd50362fec5f94\": rpc error: code = NotFound desc = could not find container \"cf2dd9a72051217b2cf6717c970e5efeb25b4b275c1f46ba55dd50362fec5f94\": container with ID starting with cf2dd9a72051217b2cf6717c970e5efeb25b4b275c1f46ba55dd50362fec5f94 not found: ID does not exist" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.596571 4932 scope.go:117] "RemoveContainer" containerID="4df5fa1be39ec85fa140b08c717bc9898d249b1741422a73c5ba8400b8894202" Nov 25 10:00:26 crc kubenswrapper[4932]: E1125 10:00:26.596916 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4df5fa1be39ec85fa140b08c717bc9898d249b1741422a73c5ba8400b8894202\": container with ID starting with 4df5fa1be39ec85fa140b08c717bc9898d249b1741422a73c5ba8400b8894202 not found: ID does not exist" containerID="4df5fa1be39ec85fa140b08c717bc9898d249b1741422a73c5ba8400b8894202" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.596960 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4df5fa1be39ec85fa140b08c717bc9898d249b1741422a73c5ba8400b8894202"} err="failed to get container status \"4df5fa1be39ec85fa140b08c717bc9898d249b1741422a73c5ba8400b8894202\": rpc error: code = NotFound desc = could not find container \"4df5fa1be39ec85fa140b08c717bc9898d249b1741422a73c5ba8400b8894202\": container with ID starting with 4df5fa1be39ec85fa140b08c717bc9898d249b1741422a73c5ba8400b8894202 not found: ID does not exist" Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.697069 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qtzc4"] Nov 25 10:00:26 crc kubenswrapper[4932]: I1125 10:00:26.701396 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qtzc4"] Nov 25 10:00:28 crc kubenswrapper[4932]: I1125 10:00:28.617386 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" path="/var/lib/kubelet/pods/26bb7515-cbe4-4f78-b161-c6a6381c5d4e/volumes" Nov 25 10:00:29 crc kubenswrapper[4932]: I1125 10:00:29.781762 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:29 crc kubenswrapper[4932]: I1125 10:00:29.783355 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:29 crc kubenswrapper[4932]: I1125 10:00:29.822539 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:30 crc kubenswrapper[4932]: I1125 10:00:30.455093 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:31 crc kubenswrapper[4932]: I1125 10:00:31.042813 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5pnq4"] Nov 25 10:00:32 crc kubenswrapper[4932]: I1125 10:00:32.419846 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5pnq4" podUID="29574742-75c9-4047-9489-1fb591f673d1" containerName="registry-server" containerID="cri-o://1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea" gracePeriod=2 Nov 25 10:00:32 crc kubenswrapper[4932]: I1125 10:00:32.838471 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:32 crc kubenswrapper[4932]: I1125 10:00:32.976129 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-utilities\") pod \"29574742-75c9-4047-9489-1fb591f673d1\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " Nov 25 10:00:32 crc kubenswrapper[4932]: I1125 10:00:32.976214 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrmnf\" (UniqueName: \"kubernetes.io/projected/29574742-75c9-4047-9489-1fb591f673d1-kube-api-access-rrmnf\") pod \"29574742-75c9-4047-9489-1fb591f673d1\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " Nov 25 10:00:32 crc kubenswrapper[4932]: I1125 10:00:32.977399 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-utilities" (OuterVolumeSpecName: "utilities") pod "29574742-75c9-4047-9489-1fb591f673d1" (UID: "29574742-75c9-4047-9489-1fb591f673d1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:32 crc kubenswrapper[4932]: I1125 10:00:32.976318 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-catalog-content\") pod \"29574742-75c9-4047-9489-1fb591f673d1\" (UID: \"29574742-75c9-4047-9489-1fb591f673d1\") " Nov 25 10:00:32 crc kubenswrapper[4932]: I1125 10:00:32.978801 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:32 crc kubenswrapper[4932]: I1125 10:00:32.985687 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29574742-75c9-4047-9489-1fb591f673d1-kube-api-access-rrmnf" (OuterVolumeSpecName: "kube-api-access-rrmnf") pod "29574742-75c9-4047-9489-1fb591f673d1" (UID: "29574742-75c9-4047-9489-1fb591f673d1"). InnerVolumeSpecName "kube-api-access-rrmnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.079353 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrmnf\" (UniqueName: \"kubernetes.io/projected/29574742-75c9-4047-9489-1fb591f673d1-kube-api-access-rrmnf\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.433029 4932 generic.go:334] "Generic (PLEG): container finished" podID="29574742-75c9-4047-9489-1fb591f673d1" containerID="1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea" exitCode=0 Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.433120 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5pnq4" event={"ID":"29574742-75c9-4047-9489-1fb591f673d1","Type":"ContainerDied","Data":"1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea"} Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.433631 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5pnq4" event={"ID":"29574742-75c9-4047-9489-1fb591f673d1","Type":"ContainerDied","Data":"c83fb9a5d6bc765fe64bea271252f697d08c64fd30b13a6ea93deb289e97fe72"} Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.433681 4932 scope.go:117] "RemoveContainer" containerID="1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea" Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.433153 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5pnq4" Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.466229 4932 scope.go:117] "RemoveContainer" containerID="561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285" Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.486471 4932 scope.go:117] "RemoveContainer" containerID="4f5291fee6db424bd6f87d1cb600b092cd268527931583ea7124763569114e38" Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.511807 4932 scope.go:117] "RemoveContainer" containerID="1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea" Nov 25 10:00:33 crc kubenswrapper[4932]: E1125 10:00:33.512772 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea\": container with ID starting with 1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea not found: ID does not exist" containerID="1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea" Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.512845 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea"} err="failed to get container status \"1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea\": rpc error: code = NotFound desc = could not find container \"1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea\": container with ID starting with 1fe6a088518b0c3490bb1c62f78e45c07d7d602cdb48372c4d1fd3b2e35c65ea not found: ID does not exist" Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.512885 4932 scope.go:117] "RemoveContainer" containerID="561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285" Nov 25 10:00:33 crc kubenswrapper[4932]: E1125 10:00:33.513308 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285\": container with ID starting with 561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285 not found: ID does not exist" containerID="561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285" Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.513364 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285"} err="failed to get container status \"561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285\": rpc error: code = NotFound desc = could not find container \"561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285\": container with ID starting with 561f62647233a1d677b2d0185406932d3a7c97bca753050084901923578ea285 not found: ID does not exist" Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.513404 4932 scope.go:117] "RemoveContainer" containerID="4f5291fee6db424bd6f87d1cb600b092cd268527931583ea7124763569114e38" Nov 25 10:00:33 crc kubenswrapper[4932]: E1125 10:00:33.513844 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f5291fee6db424bd6f87d1cb600b092cd268527931583ea7124763569114e38\": container with ID starting with 4f5291fee6db424bd6f87d1cb600b092cd268527931583ea7124763569114e38 not found: ID does not exist" containerID="4f5291fee6db424bd6f87d1cb600b092cd268527931583ea7124763569114e38" Nov 25 10:00:33 crc kubenswrapper[4932]: I1125 10:00:33.513880 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f5291fee6db424bd6f87d1cb600b092cd268527931583ea7124763569114e38"} err="failed to get container status \"4f5291fee6db424bd6f87d1cb600b092cd268527931583ea7124763569114e38\": rpc error: code = NotFound desc = could not find container \"4f5291fee6db424bd6f87d1cb600b092cd268527931583ea7124763569114e38\": container with ID starting with 4f5291fee6db424bd6f87d1cb600b092cd268527931583ea7124763569114e38 not found: ID does not exist" Nov 25 10:00:34 crc kubenswrapper[4932]: I1125 10:00:34.386067 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29574742-75c9-4047-9489-1fb591f673d1" (UID: "29574742-75c9-4047-9489-1fb591f673d1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:00:34 crc kubenswrapper[4932]: I1125 10:00:34.397880 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29574742-75c9-4047-9489-1fb591f673d1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:00:34 crc kubenswrapper[4932]: I1125 10:00:34.673742 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5pnq4"] Nov 25 10:00:34 crc kubenswrapper[4932]: I1125 10:00:34.682074 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5pnq4"] Nov 25 10:00:36 crc kubenswrapper[4932]: I1125 10:00:36.615036 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29574742-75c9-4047-9489-1fb591f673d1" path="/var/lib/kubelet/pods/29574742-75c9-4047-9489-1fb591f673d1/volumes" Nov 25 10:00:37 crc kubenswrapper[4932]: I1125 10:00:37.181334 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:00:37 crc kubenswrapper[4932]: I1125 10:00:37.181425 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:01:07 crc kubenswrapper[4932]: I1125 10:01:07.181452 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:01:07 crc kubenswrapper[4932]: I1125 10:01:07.182458 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:01:07 crc kubenswrapper[4932]: I1125 10:01:07.182537 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 10:01:07 crc kubenswrapper[4932]: I1125 10:01:07.183566 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:01:07 crc kubenswrapper[4932]: I1125 10:01:07.183653 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" gracePeriod=600 Nov 25 10:01:07 crc kubenswrapper[4932]: E1125 10:01:07.324022 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:01:07 crc kubenswrapper[4932]: I1125 10:01:07.728648 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" exitCode=0 Nov 25 10:01:07 crc kubenswrapper[4932]: I1125 10:01:07.728709 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94"} Nov 25 10:01:07 crc kubenswrapper[4932]: I1125 10:01:07.728755 4932 scope.go:117] "RemoveContainer" containerID="1e69d2d7b067068ef393101ca1c3436cce542093894f5fecc6c1435331f4ff20" Nov 25 10:01:07 crc kubenswrapper[4932]: I1125 10:01:07.729295 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:01:07 crc kubenswrapper[4932]: E1125 10:01:07.729540 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:01:18 crc kubenswrapper[4932]: I1125 10:01:18.606428 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:01:18 crc kubenswrapper[4932]: E1125 10:01:18.607568 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:01:29 crc kubenswrapper[4932]: I1125 10:01:29.606169 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:01:29 crc kubenswrapper[4932]: E1125 10:01:29.607119 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:01:42 crc kubenswrapper[4932]: I1125 10:01:42.606221 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:01:42 crc kubenswrapper[4932]: E1125 10:01:42.607219 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:01:53 crc kubenswrapper[4932]: I1125 10:01:53.606290 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:01:53 crc kubenswrapper[4932]: E1125 10:01:53.607760 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:02:04 crc kubenswrapper[4932]: I1125 10:02:04.607027 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:02:04 crc kubenswrapper[4932]: E1125 10:02:04.608137 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:02:15 crc kubenswrapper[4932]: I1125 10:02:15.605793 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:02:15 crc kubenswrapper[4932]: E1125 10:02:15.606750 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:02:29 crc kubenswrapper[4932]: I1125 10:02:29.605407 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:02:29 crc kubenswrapper[4932]: E1125 10:02:29.606311 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.082178 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2hllr"] Nov 25 10:02:36 crc kubenswrapper[4932]: E1125 10:02:36.083346 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29574742-75c9-4047-9489-1fb591f673d1" containerName="registry-server" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.083364 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="29574742-75c9-4047-9489-1fb591f673d1" containerName="registry-server" Nov 25 10:02:36 crc kubenswrapper[4932]: E1125 10:02:36.083380 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29574742-75c9-4047-9489-1fb591f673d1" containerName="extract-content" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.083390 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="29574742-75c9-4047-9489-1fb591f673d1" containerName="extract-content" Nov 25 10:02:36 crc kubenswrapper[4932]: E1125 10:02:36.083407 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" containerName="extract-utilities" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.083416 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" containerName="extract-utilities" Nov 25 10:02:36 crc kubenswrapper[4932]: E1125 10:02:36.083446 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" containerName="extract-content" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.083454 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" containerName="extract-content" Nov 25 10:02:36 crc kubenswrapper[4932]: E1125 10:02:36.083477 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29574742-75c9-4047-9489-1fb591f673d1" containerName="extract-utilities" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.083487 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="29574742-75c9-4047-9489-1fb591f673d1" containerName="extract-utilities" Nov 25 10:02:36 crc kubenswrapper[4932]: E1125 10:02:36.083503 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" containerName="registry-server" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.083512 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" containerName="registry-server" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.083707 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="26bb7515-cbe4-4f78-b161-c6a6381c5d4e" containerName="registry-server" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.083726 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="29574742-75c9-4047-9489-1fb591f673d1" containerName="registry-server" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.085183 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.098822 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2hllr"] Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.127819 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-utilities\") pod \"certified-operators-2hllr\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.127893 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwb7k\" (UniqueName: \"kubernetes.io/projected/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-kube-api-access-lwb7k\") pod \"certified-operators-2hllr\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.127953 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-catalog-content\") pod \"certified-operators-2hllr\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.229365 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-utilities\") pod \"certified-operators-2hllr\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.229438 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwb7k\" (UniqueName: \"kubernetes.io/projected/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-kube-api-access-lwb7k\") pod \"certified-operators-2hllr\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.229464 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-catalog-content\") pod \"certified-operators-2hllr\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.230090 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-utilities\") pod \"certified-operators-2hllr\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.230279 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-catalog-content\") pod \"certified-operators-2hllr\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.257298 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwb7k\" (UniqueName: \"kubernetes.io/projected/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-kube-api-access-lwb7k\") pod \"certified-operators-2hllr\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.415498 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:36 crc kubenswrapper[4932]: I1125 10:02:36.930388 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2hllr"] Nov 25 10:02:37 crc kubenswrapper[4932]: I1125 10:02:37.529935 4932 generic.go:334] "Generic (PLEG): container finished" podID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" containerID="f4bf21b3161fe6b043e978e02931c5c1a329d04cfaf2804ccd6a9fe7a9e423d6" exitCode=0 Nov 25 10:02:37 crc kubenswrapper[4932]: I1125 10:02:37.530411 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hllr" event={"ID":"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560","Type":"ContainerDied","Data":"f4bf21b3161fe6b043e978e02931c5c1a329d04cfaf2804ccd6a9fe7a9e423d6"} Nov 25 10:02:37 crc kubenswrapper[4932]: I1125 10:02:37.531369 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hllr" event={"ID":"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560","Type":"ContainerStarted","Data":"780858367b4707cb152e2ffd8e8d98ddee169154250d42e458b9c995e76e844a"} Nov 25 10:02:38 crc kubenswrapper[4932]: I1125 10:02:38.545619 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hllr" event={"ID":"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560","Type":"ContainerStarted","Data":"3a4afd9eb4ad089b3ee90cf0fcdc57a05663d2472d65c98f9b170296f805171f"} Nov 25 10:02:39 crc kubenswrapper[4932]: I1125 10:02:39.564451 4932 generic.go:334] "Generic (PLEG): container finished" podID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" containerID="3a4afd9eb4ad089b3ee90cf0fcdc57a05663d2472d65c98f9b170296f805171f" exitCode=0 Nov 25 10:02:39 crc kubenswrapper[4932]: I1125 10:02:39.564553 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hllr" event={"ID":"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560","Type":"ContainerDied","Data":"3a4afd9eb4ad089b3ee90cf0fcdc57a05663d2472d65c98f9b170296f805171f"} Nov 25 10:02:40 crc kubenswrapper[4932]: I1125 10:02:40.577380 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hllr" event={"ID":"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560","Type":"ContainerStarted","Data":"2af8921095c6c7de8d2ce13d20dd60219f4aad2b3de1a2ea0bf8debe1ff98160"} Nov 25 10:02:40 crc kubenswrapper[4932]: I1125 10:02:40.607555 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2hllr" podStartSLOduration=2.134410854 podStartE2EDuration="4.607520958s" podCreationTimestamp="2025-11-25 10:02:36 +0000 UTC" firstStartedPulling="2025-11-25 10:02:37.532019711 +0000 UTC m=+4417.658049284" lastFinishedPulling="2025-11-25 10:02:40.005129825 +0000 UTC m=+4420.131159388" observedRunningTime="2025-11-25 10:02:40.60410664 +0000 UTC m=+4420.730136243" watchObservedRunningTime="2025-11-25 10:02:40.607520958 +0000 UTC m=+4420.733550541" Nov 25 10:02:42 crc kubenswrapper[4932]: I1125 10:02:42.607376 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:02:42 crc kubenswrapper[4932]: E1125 10:02:42.607842 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:02:46 crc kubenswrapper[4932]: I1125 10:02:46.415737 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:46 crc kubenswrapper[4932]: I1125 10:02:46.416554 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:46 crc kubenswrapper[4932]: I1125 10:02:46.461029 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:46 crc kubenswrapper[4932]: I1125 10:02:46.672731 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:46 crc kubenswrapper[4932]: I1125 10:02:46.719384 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2hllr"] Nov 25 10:02:48 crc kubenswrapper[4932]: I1125 10:02:48.648966 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2hllr" podUID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" containerName="registry-server" containerID="cri-o://2af8921095c6c7de8d2ce13d20dd60219f4aad2b3de1a2ea0bf8debe1ff98160" gracePeriod=2 Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.664853 4932 generic.go:334] "Generic (PLEG): container finished" podID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" containerID="2af8921095c6c7de8d2ce13d20dd60219f4aad2b3de1a2ea0bf8debe1ff98160" exitCode=0 Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.664916 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hllr" event={"ID":"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560","Type":"ContainerDied","Data":"2af8921095c6c7de8d2ce13d20dd60219f4aad2b3de1a2ea0bf8debe1ff98160"} Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.719033 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.779770 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-catalog-content\") pod \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.780372 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-utilities\") pod \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.780545 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwb7k\" (UniqueName: \"kubernetes.io/projected/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-kube-api-access-lwb7k\") pod \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\" (UID: \"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560\") " Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.785781 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-utilities" (OuterVolumeSpecName: "utilities") pod "8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" (UID: "8a52ee60-3e77-4b98-a2ea-5dadbfbfb560"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.797676 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-kube-api-access-lwb7k" (OuterVolumeSpecName: "kube-api-access-lwb7k") pod "8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" (UID: "8a52ee60-3e77-4b98-a2ea-5dadbfbfb560"). InnerVolumeSpecName "kube-api-access-lwb7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.834017 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" (UID: "8a52ee60-3e77-4b98-a2ea-5dadbfbfb560"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.881681 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.881711 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwb7k\" (UniqueName: \"kubernetes.io/projected/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-kube-api-access-lwb7k\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:49 crc kubenswrapper[4932]: I1125 10:02:49.881720 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:02:50 crc kubenswrapper[4932]: I1125 10:02:50.676118 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2hllr" event={"ID":"8a52ee60-3e77-4b98-a2ea-5dadbfbfb560","Type":"ContainerDied","Data":"780858367b4707cb152e2ffd8e8d98ddee169154250d42e458b9c995e76e844a"} Nov 25 10:02:50 crc kubenswrapper[4932]: I1125 10:02:50.676179 4932 scope.go:117] "RemoveContainer" containerID="2af8921095c6c7de8d2ce13d20dd60219f4aad2b3de1a2ea0bf8debe1ff98160" Nov 25 10:02:50 crc kubenswrapper[4932]: I1125 10:02:50.676224 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2hllr" Nov 25 10:02:50 crc kubenswrapper[4932]: I1125 10:02:50.701901 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2hllr"] Nov 25 10:02:50 crc kubenswrapper[4932]: I1125 10:02:50.708602 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2hllr"] Nov 25 10:02:50 crc kubenswrapper[4932]: I1125 10:02:50.710205 4932 scope.go:117] "RemoveContainer" containerID="3a4afd9eb4ad089b3ee90cf0fcdc57a05663d2472d65c98f9b170296f805171f" Nov 25 10:02:50 crc kubenswrapper[4932]: I1125 10:02:50.745802 4932 scope.go:117] "RemoveContainer" containerID="f4bf21b3161fe6b043e978e02931c5c1a329d04cfaf2804ccd6a9fe7a9e423d6" Nov 25 10:02:52 crc kubenswrapper[4932]: I1125 10:02:52.643801 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" path="/var/lib/kubelet/pods/8a52ee60-3e77-4b98-a2ea-5dadbfbfb560/volumes" Nov 25 10:02:55 crc kubenswrapper[4932]: I1125 10:02:55.607288 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:02:55 crc kubenswrapper[4932]: E1125 10:02:55.609683 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:03:07 crc kubenswrapper[4932]: I1125 10:03:07.605706 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:03:07 crc kubenswrapper[4932]: E1125 10:03:07.606710 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:03:22 crc kubenswrapper[4932]: I1125 10:03:22.606617 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:03:22 crc kubenswrapper[4932]: E1125 10:03:22.607569 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:03:35 crc kubenswrapper[4932]: I1125 10:03:35.607302 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:03:35 crc kubenswrapper[4932]: E1125 10:03:35.608544 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:03:49 crc kubenswrapper[4932]: I1125 10:03:49.606607 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:03:49 crc kubenswrapper[4932]: E1125 10:03:49.608077 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:04:04 crc kubenswrapper[4932]: I1125 10:04:04.606727 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:04:04 crc kubenswrapper[4932]: E1125 10:04:04.607959 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:04:18 crc kubenswrapper[4932]: I1125 10:04:18.606356 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:04:18 crc kubenswrapper[4932]: E1125 10:04:18.607715 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:04:30 crc kubenswrapper[4932]: I1125 10:04:30.611523 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:04:30 crc kubenswrapper[4932]: E1125 10:04:30.612598 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:04:42 crc kubenswrapper[4932]: I1125 10:04:42.607031 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:04:42 crc kubenswrapper[4932]: E1125 10:04:42.608003 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:04:57 crc kubenswrapper[4932]: I1125 10:04:57.605979 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:04:57 crc kubenswrapper[4932]: E1125 10:04:57.606936 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:05:11 crc kubenswrapper[4932]: I1125 10:05:11.607019 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:05:11 crc kubenswrapper[4932]: E1125 10:05:11.608348 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:05:24 crc kubenswrapper[4932]: I1125 10:05:24.605848 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:05:24 crc kubenswrapper[4932]: E1125 10:05:24.607533 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:05:37 crc kubenswrapper[4932]: I1125 10:05:37.606756 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:05:37 crc kubenswrapper[4932]: E1125 10:05:37.608026 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:05:48 crc kubenswrapper[4932]: I1125 10:05:48.606389 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:05:48 crc kubenswrapper[4932]: E1125 10:05:48.607615 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:06:01 crc kubenswrapper[4932]: I1125 10:06:01.606538 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:06:01 crc kubenswrapper[4932]: E1125 10:06:01.612695 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:06:15 crc kubenswrapper[4932]: I1125 10:06:15.607044 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:06:16 crc kubenswrapper[4932]: I1125 10:06:16.656546 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"b1eccdea28624af1d89e99001e8aa1973651621a0b0dbb3c72f710d48119bf2d"} Nov 25 10:08:37 crc kubenswrapper[4932]: I1125 10:08:37.181163 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:08:37 crc kubenswrapper[4932]: I1125 10:08:37.181766 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:09:07 crc kubenswrapper[4932]: I1125 10:09:07.181466 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:09:07 crc kubenswrapper[4932]: I1125 10:09:07.182348 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:09:37 crc kubenswrapper[4932]: I1125 10:09:37.181226 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:09:37 crc kubenswrapper[4932]: I1125 10:09:37.181673 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:09:37 crc kubenswrapper[4932]: I1125 10:09:37.181716 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 10:09:37 crc kubenswrapper[4932]: I1125 10:09:37.182149 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b1eccdea28624af1d89e99001e8aa1973651621a0b0dbb3c72f710d48119bf2d"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:09:37 crc kubenswrapper[4932]: I1125 10:09:37.182212 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://b1eccdea28624af1d89e99001e8aa1973651621a0b0dbb3c72f710d48119bf2d" gracePeriod=600 Nov 25 10:09:37 crc kubenswrapper[4932]: I1125 10:09:37.532566 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="b1eccdea28624af1d89e99001e8aa1973651621a0b0dbb3c72f710d48119bf2d" exitCode=0 Nov 25 10:09:37 crc kubenswrapper[4932]: I1125 10:09:37.532853 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"b1eccdea28624af1d89e99001e8aa1973651621a0b0dbb3c72f710d48119bf2d"} Nov 25 10:09:37 crc kubenswrapper[4932]: I1125 10:09:37.533585 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f"} Nov 25 10:09:37 crc kubenswrapper[4932]: I1125 10:09:37.533738 4932 scope.go:117] "RemoveContainer" containerID="86072dd9eed006dfe144816e1c9eb4a93f3a0128a305b6e8d826a617a81edf94" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.019922 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-hjclb"] Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.030186 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-hjclb"] Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.151230 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-6zdxq"] Nov 25 10:10:00 crc kubenswrapper[4932]: E1125 10:10:00.151548 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" containerName="extract-content" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.151564 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" containerName="extract-content" Nov 25 10:10:00 crc kubenswrapper[4932]: E1125 10:10:00.151572 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" containerName="registry-server" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.151578 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" containerName="registry-server" Nov 25 10:10:00 crc kubenswrapper[4932]: E1125 10:10:00.151594 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" containerName="extract-utilities" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.151601 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" containerName="extract-utilities" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.151763 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a52ee60-3e77-4b98-a2ea-5dadbfbfb560" containerName="registry-server" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.152265 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.155328 4932 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-vqtxm" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.155689 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.156393 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.157303 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.160942 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-6zdxq"] Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.204760 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6lbk\" (UniqueName: \"kubernetes.io/projected/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-kube-api-access-l6lbk\") pod \"crc-storage-crc-6zdxq\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.204862 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-crc-storage\") pod \"crc-storage-crc-6zdxq\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.205067 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-node-mnt\") pod \"crc-storage-crc-6zdxq\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.306526 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-crc-storage\") pod \"crc-storage-crc-6zdxq\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.306680 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-node-mnt\") pod \"crc-storage-crc-6zdxq\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.306741 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6lbk\" (UniqueName: \"kubernetes.io/projected/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-kube-api-access-l6lbk\") pod \"crc-storage-crc-6zdxq\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.307093 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-node-mnt\") pod \"crc-storage-crc-6zdxq\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.307910 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-crc-storage\") pod \"crc-storage-crc-6zdxq\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.330942 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6lbk\" (UniqueName: \"kubernetes.io/projected/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-kube-api-access-l6lbk\") pod \"crc-storage-crc-6zdxq\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.471176 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.617780 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20306d8c-9042-4cc0-9957-6b45a2c58762" path="/var/lib/kubelet/pods/20306d8c-9042-4cc0-9957-6b45a2c58762/volumes" Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.924095 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-6zdxq"] Nov 25 10:10:00 crc kubenswrapper[4932]: I1125 10:10:00.928448 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:10:01 crc kubenswrapper[4932]: I1125 10:10:01.760423 4932 generic.go:334] "Generic (PLEG): container finished" podID="b75193ef-47d5-428d-a0b7-1f08f68b1eb8" containerID="7667aedad8afe934f6d67cb341e47ee4b49f6256a77beb6a1ee682d4be1c175e" exitCode=0 Nov 25 10:10:01 crc kubenswrapper[4932]: I1125 10:10:01.760508 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-6zdxq" event={"ID":"b75193ef-47d5-428d-a0b7-1f08f68b1eb8","Type":"ContainerDied","Data":"7667aedad8afe934f6d67cb341e47ee4b49f6256a77beb6a1ee682d4be1c175e"} Nov 25 10:10:01 crc kubenswrapper[4932]: I1125 10:10:01.761416 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-6zdxq" event={"ID":"b75193ef-47d5-428d-a0b7-1f08f68b1eb8","Type":"ContainerStarted","Data":"c4423a1d3405e932a68adf4ebe4ca0637eff027d49fcc1dc2067a866b9f3c639"} Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.064631 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.162918 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-node-mnt\") pod \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.163053 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6lbk\" (UniqueName: \"kubernetes.io/projected/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-kube-api-access-l6lbk\") pod \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.163106 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "b75193ef-47d5-428d-a0b7-1f08f68b1eb8" (UID: "b75193ef-47d5-428d-a0b7-1f08f68b1eb8"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.163252 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-crc-storage\") pod \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\" (UID: \"b75193ef-47d5-428d-a0b7-1f08f68b1eb8\") " Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.163986 4932 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.171526 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-kube-api-access-l6lbk" (OuterVolumeSpecName: "kube-api-access-l6lbk") pod "b75193ef-47d5-428d-a0b7-1f08f68b1eb8" (UID: "b75193ef-47d5-428d-a0b7-1f08f68b1eb8"). InnerVolumeSpecName "kube-api-access-l6lbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.190528 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "b75193ef-47d5-428d-a0b7-1f08f68b1eb8" (UID: "b75193ef-47d5-428d-a0b7-1f08f68b1eb8"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.264752 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6lbk\" (UniqueName: \"kubernetes.io/projected/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-kube-api-access-l6lbk\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.264812 4932 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b75193ef-47d5-428d-a0b7-1f08f68b1eb8-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.794561 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-6zdxq" event={"ID":"b75193ef-47d5-428d-a0b7-1f08f68b1eb8","Type":"ContainerDied","Data":"c4423a1d3405e932a68adf4ebe4ca0637eff027d49fcc1dc2067a866b9f3c639"} Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.794611 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c4423a1d3405e932a68adf4ebe4ca0637eff027d49fcc1dc2067a866b9f3c639" Nov 25 10:10:03 crc kubenswrapper[4932]: I1125 10:10:03.794645 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-6zdxq" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.430353 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-6zdxq"] Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.435327 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-6zdxq"] Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.583915 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-dptsx"] Nov 25 10:10:05 crc kubenswrapper[4932]: E1125 10:10:05.584303 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b75193ef-47d5-428d-a0b7-1f08f68b1eb8" containerName="storage" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.584328 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b75193ef-47d5-428d-a0b7-1f08f68b1eb8" containerName="storage" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.584489 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b75193ef-47d5-428d-a0b7-1f08f68b1eb8" containerName="storage" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.584969 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.586838 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.587110 4932 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-vqtxm" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.587726 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.588094 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.601605 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-dptsx"] Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.744326 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpkwt\" (UniqueName: \"kubernetes.io/projected/03e2175c-7dd7-40ef-803a-b6ff3b08563c-kube-api-access-kpkwt\") pod \"crc-storage-crc-dptsx\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.744427 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/03e2175c-7dd7-40ef-803a-b6ff3b08563c-crc-storage\") pod \"crc-storage-crc-dptsx\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.744448 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/03e2175c-7dd7-40ef-803a-b6ff3b08563c-node-mnt\") pod \"crc-storage-crc-dptsx\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.845648 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/03e2175c-7dd7-40ef-803a-b6ff3b08563c-crc-storage\") pod \"crc-storage-crc-dptsx\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.845710 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/03e2175c-7dd7-40ef-803a-b6ff3b08563c-node-mnt\") pod \"crc-storage-crc-dptsx\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.845836 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpkwt\" (UniqueName: \"kubernetes.io/projected/03e2175c-7dd7-40ef-803a-b6ff3b08563c-kube-api-access-kpkwt\") pod \"crc-storage-crc-dptsx\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.846397 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/03e2175c-7dd7-40ef-803a-b6ff3b08563c-node-mnt\") pod \"crc-storage-crc-dptsx\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.846571 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/03e2175c-7dd7-40ef-803a-b6ff3b08563c-crc-storage\") pod \"crc-storage-crc-dptsx\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.866833 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpkwt\" (UniqueName: \"kubernetes.io/projected/03e2175c-7dd7-40ef-803a-b6ff3b08563c-kube-api-access-kpkwt\") pod \"crc-storage-crc-dptsx\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:05 crc kubenswrapper[4932]: I1125 10:10:05.911081 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:06 crc kubenswrapper[4932]: I1125 10:10:06.361632 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-dptsx"] Nov 25 10:10:06 crc kubenswrapper[4932]: W1125 10:10:06.370734 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03e2175c_7dd7_40ef_803a_b6ff3b08563c.slice/crio-4baa78727b81f8cac0e2cfd1b9ae9403c29ca3d1df7f9fe8d444a27b2487bcfd WatchSource:0}: Error finding container 4baa78727b81f8cac0e2cfd1b9ae9403c29ca3d1df7f9fe8d444a27b2487bcfd: Status 404 returned error can't find the container with id 4baa78727b81f8cac0e2cfd1b9ae9403c29ca3d1df7f9fe8d444a27b2487bcfd Nov 25 10:10:06 crc kubenswrapper[4932]: I1125 10:10:06.617712 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b75193ef-47d5-428d-a0b7-1f08f68b1eb8" path="/var/lib/kubelet/pods/b75193ef-47d5-428d-a0b7-1f08f68b1eb8/volumes" Nov 25 10:10:06 crc kubenswrapper[4932]: I1125 10:10:06.822483 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-dptsx" event={"ID":"03e2175c-7dd7-40ef-803a-b6ff3b08563c","Type":"ContainerStarted","Data":"4baa78727b81f8cac0e2cfd1b9ae9403c29ca3d1df7f9fe8d444a27b2487bcfd"} Nov 25 10:10:07 crc kubenswrapper[4932]: I1125 10:10:07.835666 4932 generic.go:334] "Generic (PLEG): container finished" podID="03e2175c-7dd7-40ef-803a-b6ff3b08563c" containerID="c87eb8925734af9d7961bbdf355574082ed5801c387431aa370e55529c3cd825" exitCode=0 Nov 25 10:10:07 crc kubenswrapper[4932]: I1125 10:10:07.835786 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-dptsx" event={"ID":"03e2175c-7dd7-40ef-803a-b6ff3b08563c","Type":"ContainerDied","Data":"c87eb8925734af9d7961bbdf355574082ed5801c387431aa370e55529c3cd825"} Nov 25 10:10:08 crc kubenswrapper[4932]: I1125 10:10:08.638567 4932 scope.go:117] "RemoveContainer" containerID="89c42f30eb26d38b0d42182d861713eb3e568b7805d21757f8c81cb7d45f640f" Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.177965 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.304042 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/03e2175c-7dd7-40ef-803a-b6ff3b08563c-node-mnt\") pod \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.304165 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/03e2175c-7dd7-40ef-803a-b6ff3b08563c-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "03e2175c-7dd7-40ef-803a-b6ff3b08563c" (UID: "03e2175c-7dd7-40ef-803a-b6ff3b08563c"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.304298 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpkwt\" (UniqueName: \"kubernetes.io/projected/03e2175c-7dd7-40ef-803a-b6ff3b08563c-kube-api-access-kpkwt\") pod \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.304397 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/03e2175c-7dd7-40ef-803a-b6ff3b08563c-crc-storage\") pod \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\" (UID: \"03e2175c-7dd7-40ef-803a-b6ff3b08563c\") " Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.304851 4932 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/03e2175c-7dd7-40ef-803a-b6ff3b08563c-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.311467 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03e2175c-7dd7-40ef-803a-b6ff3b08563c-kube-api-access-kpkwt" (OuterVolumeSpecName: "kube-api-access-kpkwt") pod "03e2175c-7dd7-40ef-803a-b6ff3b08563c" (UID: "03e2175c-7dd7-40ef-803a-b6ff3b08563c"). InnerVolumeSpecName "kube-api-access-kpkwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.338664 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03e2175c-7dd7-40ef-803a-b6ff3b08563c-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "03e2175c-7dd7-40ef-803a-b6ff3b08563c" (UID: "03e2175c-7dd7-40ef-803a-b6ff3b08563c"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.406680 4932 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/03e2175c-7dd7-40ef-803a-b6ff3b08563c-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.406723 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpkwt\" (UniqueName: \"kubernetes.io/projected/03e2175c-7dd7-40ef-803a-b6ff3b08563c-kube-api-access-kpkwt\") on node \"crc\" DevicePath \"\"" Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.865099 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-dptsx" event={"ID":"03e2175c-7dd7-40ef-803a-b6ff3b08563c","Type":"ContainerDied","Data":"4baa78727b81f8cac0e2cfd1b9ae9403c29ca3d1df7f9fe8d444a27b2487bcfd"} Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.865136 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4baa78727b81f8cac0e2cfd1b9ae9403c29ca3d1df7f9fe8d444a27b2487bcfd" Nov 25 10:10:09 crc kubenswrapper[4932]: I1125 10:10:09.865183 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-dptsx" Nov 25 10:10:56 crc kubenswrapper[4932]: I1125 10:10:56.798662 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-q7tvh"] Nov 25 10:10:56 crc kubenswrapper[4932]: E1125 10:10:56.799514 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03e2175c-7dd7-40ef-803a-b6ff3b08563c" containerName="storage" Nov 25 10:10:56 crc kubenswrapper[4932]: I1125 10:10:56.799530 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="03e2175c-7dd7-40ef-803a-b6ff3b08563c" containerName="storage" Nov 25 10:10:56 crc kubenswrapper[4932]: I1125 10:10:56.799692 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="03e2175c-7dd7-40ef-803a-b6ff3b08563c" containerName="storage" Nov 25 10:10:56 crc kubenswrapper[4932]: I1125 10:10:56.800942 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:10:56 crc kubenswrapper[4932]: I1125 10:10:56.813317 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q7tvh"] Nov 25 10:10:56 crc kubenswrapper[4932]: I1125 10:10:56.963834 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-utilities\") pod \"community-operators-q7tvh\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:10:56 crc kubenswrapper[4932]: I1125 10:10:56.963885 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9s7q\" (UniqueName: \"kubernetes.io/projected/d0167e71-186e-4b01-a699-65cdaab0e1de-kube-api-access-c9s7q\") pod \"community-operators-q7tvh\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:10:56 crc kubenswrapper[4932]: I1125 10:10:56.963934 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-catalog-content\") pod \"community-operators-q7tvh\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:10:57 crc kubenswrapper[4932]: I1125 10:10:57.065363 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-catalog-content\") pod \"community-operators-q7tvh\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:10:57 crc kubenswrapper[4932]: I1125 10:10:57.065782 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-utilities\") pod \"community-operators-q7tvh\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:10:57 crc kubenswrapper[4932]: I1125 10:10:57.065941 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9s7q\" (UniqueName: \"kubernetes.io/projected/d0167e71-186e-4b01-a699-65cdaab0e1de-kube-api-access-c9s7q\") pod \"community-operators-q7tvh\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:10:57 crc kubenswrapper[4932]: I1125 10:10:57.066328 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-utilities\") pod \"community-operators-q7tvh\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:10:57 crc kubenswrapper[4932]: I1125 10:10:57.066035 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-catalog-content\") pod \"community-operators-q7tvh\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:10:57 crc kubenswrapper[4932]: I1125 10:10:57.095966 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9s7q\" (UniqueName: \"kubernetes.io/projected/d0167e71-186e-4b01-a699-65cdaab0e1de-kube-api-access-c9s7q\") pod \"community-operators-q7tvh\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:10:57 crc kubenswrapper[4932]: I1125 10:10:57.124220 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:10:57 crc kubenswrapper[4932]: I1125 10:10:57.629341 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-q7tvh"] Nov 25 10:10:58 crc kubenswrapper[4932]: I1125 10:10:58.348782 4932 generic.go:334] "Generic (PLEG): container finished" podID="d0167e71-186e-4b01-a699-65cdaab0e1de" containerID="37a6f58652e5fc42c1b1278f7ad7780d5aebeb9b03647f32f8679a3261276079" exitCode=0 Nov 25 10:10:58 crc kubenswrapper[4932]: I1125 10:10:58.348887 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7tvh" event={"ID":"d0167e71-186e-4b01-a699-65cdaab0e1de","Type":"ContainerDied","Data":"37a6f58652e5fc42c1b1278f7ad7780d5aebeb9b03647f32f8679a3261276079"} Nov 25 10:10:58 crc kubenswrapper[4932]: I1125 10:10:58.349149 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7tvh" event={"ID":"d0167e71-186e-4b01-a699-65cdaab0e1de","Type":"ContainerStarted","Data":"cc605e3bc534c1253a4d5ac16f6130ce70cb471a5679d002ef20bafb807a9fe9"} Nov 25 10:10:59 crc kubenswrapper[4932]: I1125 10:10:59.367454 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7tvh" event={"ID":"d0167e71-186e-4b01-a699-65cdaab0e1de","Type":"ContainerStarted","Data":"174c2a29adda64ebabd57a126bc4b03c274fc9e1687fd400d434b0006be8a102"} Nov 25 10:11:00 crc kubenswrapper[4932]: I1125 10:11:00.378672 4932 generic.go:334] "Generic (PLEG): container finished" podID="d0167e71-186e-4b01-a699-65cdaab0e1de" containerID="174c2a29adda64ebabd57a126bc4b03c274fc9e1687fd400d434b0006be8a102" exitCode=0 Nov 25 10:11:00 crc kubenswrapper[4932]: I1125 10:11:00.378730 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7tvh" event={"ID":"d0167e71-186e-4b01-a699-65cdaab0e1de","Type":"ContainerDied","Data":"174c2a29adda64ebabd57a126bc4b03c274fc9e1687fd400d434b0006be8a102"} Nov 25 10:11:01 crc kubenswrapper[4932]: I1125 10:11:01.389101 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7tvh" event={"ID":"d0167e71-186e-4b01-a699-65cdaab0e1de","Type":"ContainerStarted","Data":"7c74fd1ad169bb51832677d1ea2275f471bafb1c43bd58ebb6fa6d936f97de5e"} Nov 25 10:11:01 crc kubenswrapper[4932]: I1125 10:11:01.417276 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-q7tvh" podStartSLOduration=2.806446282 podStartE2EDuration="5.417253699s" podCreationTimestamp="2025-11-25 10:10:56 +0000 UTC" firstStartedPulling="2025-11-25 10:10:58.350843584 +0000 UTC m=+4918.476873147" lastFinishedPulling="2025-11-25 10:11:00.961651001 +0000 UTC m=+4921.087680564" observedRunningTime="2025-11-25 10:11:01.412820721 +0000 UTC m=+4921.538850304" watchObservedRunningTime="2025-11-25 10:11:01.417253699 +0000 UTC m=+4921.543283262" Nov 25 10:11:07 crc kubenswrapper[4932]: I1125 10:11:07.124862 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:11:07 crc kubenswrapper[4932]: I1125 10:11:07.125362 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:11:07 crc kubenswrapper[4932]: I1125 10:11:07.181504 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:11:07 crc kubenswrapper[4932]: I1125 10:11:07.517295 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:11:07 crc kubenswrapper[4932]: I1125 10:11:07.581359 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q7tvh"] Nov 25 10:11:09 crc kubenswrapper[4932]: I1125 10:11:09.462981 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-q7tvh" podUID="d0167e71-186e-4b01-a699-65cdaab0e1de" containerName="registry-server" containerID="cri-o://7c74fd1ad169bb51832677d1ea2275f471bafb1c43bd58ebb6fa6d936f97de5e" gracePeriod=2 Nov 25 10:11:10 crc kubenswrapper[4932]: I1125 10:11:10.478161 4932 generic.go:334] "Generic (PLEG): container finished" podID="d0167e71-186e-4b01-a699-65cdaab0e1de" containerID="7c74fd1ad169bb51832677d1ea2275f471bafb1c43bd58ebb6fa6d936f97de5e" exitCode=0 Nov 25 10:11:10 crc kubenswrapper[4932]: I1125 10:11:10.478310 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7tvh" event={"ID":"d0167e71-186e-4b01-a699-65cdaab0e1de","Type":"ContainerDied","Data":"7c74fd1ad169bb51832677d1ea2275f471bafb1c43bd58ebb6fa6d936f97de5e"} Nov 25 10:11:10 crc kubenswrapper[4932]: I1125 10:11:10.933951 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:11:10 crc kubenswrapper[4932]: I1125 10:11:10.994218 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-catalog-content\") pod \"d0167e71-186e-4b01-a699-65cdaab0e1de\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " Nov 25 10:11:10 crc kubenswrapper[4932]: I1125 10:11:10.994282 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9s7q\" (UniqueName: \"kubernetes.io/projected/d0167e71-186e-4b01-a699-65cdaab0e1de-kube-api-access-c9s7q\") pod \"d0167e71-186e-4b01-a699-65cdaab0e1de\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " Nov 25 10:11:10 crc kubenswrapper[4932]: I1125 10:11:10.994348 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-utilities\") pod \"d0167e71-186e-4b01-a699-65cdaab0e1de\" (UID: \"d0167e71-186e-4b01-a699-65cdaab0e1de\") " Nov 25 10:11:10 crc kubenswrapper[4932]: I1125 10:11:10.996106 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-utilities" (OuterVolumeSpecName: "utilities") pod "d0167e71-186e-4b01-a699-65cdaab0e1de" (UID: "d0167e71-186e-4b01-a699-65cdaab0e1de"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.001237 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0167e71-186e-4b01-a699-65cdaab0e1de-kube-api-access-c9s7q" (OuterVolumeSpecName: "kube-api-access-c9s7q") pod "d0167e71-186e-4b01-a699-65cdaab0e1de" (UID: "d0167e71-186e-4b01-a699-65cdaab0e1de"). InnerVolumeSpecName "kube-api-access-c9s7q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.052063 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d0167e71-186e-4b01-a699-65cdaab0e1de" (UID: "d0167e71-186e-4b01-a699-65cdaab0e1de"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.096454 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.096536 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9s7q\" (UniqueName: \"kubernetes.io/projected/d0167e71-186e-4b01-a699-65cdaab0e1de-kube-api-access-c9s7q\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.096562 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0167e71-186e-4b01-a699-65cdaab0e1de-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.492931 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-q7tvh" event={"ID":"d0167e71-186e-4b01-a699-65cdaab0e1de","Type":"ContainerDied","Data":"cc605e3bc534c1253a4d5ac16f6130ce70cb471a5679d002ef20bafb807a9fe9"} Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.493019 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-q7tvh" Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.493392 4932 scope.go:117] "RemoveContainer" containerID="7c74fd1ad169bb51832677d1ea2275f471bafb1c43bd58ebb6fa6d936f97de5e" Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.526666 4932 scope.go:117] "RemoveContainer" containerID="174c2a29adda64ebabd57a126bc4b03c274fc9e1687fd400d434b0006be8a102" Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.551930 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-q7tvh"] Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.561050 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-q7tvh"] Nov 25 10:11:11 crc kubenswrapper[4932]: I1125 10:11:11.573096 4932 scope.go:117] "RemoveContainer" containerID="37a6f58652e5fc42c1b1278f7ad7780d5aebeb9b03647f32f8679a3261276079" Nov 25 10:11:12 crc kubenswrapper[4932]: I1125 10:11:12.623590 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0167e71-186e-4b01-a699-65cdaab0e1de" path="/var/lib/kubelet/pods/d0167e71-186e-4b01-a699-65cdaab0e1de/volumes" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.064233 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tqtl5"] Nov 25 10:11:21 crc kubenswrapper[4932]: E1125 10:11:21.065176 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0167e71-186e-4b01-a699-65cdaab0e1de" containerName="extract-content" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.065225 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0167e71-186e-4b01-a699-65cdaab0e1de" containerName="extract-content" Nov 25 10:11:21 crc kubenswrapper[4932]: E1125 10:11:21.065250 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0167e71-186e-4b01-a699-65cdaab0e1de" containerName="registry-server" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.065258 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0167e71-186e-4b01-a699-65cdaab0e1de" containerName="registry-server" Nov 25 10:11:21 crc kubenswrapper[4932]: E1125 10:11:21.065275 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0167e71-186e-4b01-a699-65cdaab0e1de" containerName="extract-utilities" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.065284 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0167e71-186e-4b01-a699-65cdaab0e1de" containerName="extract-utilities" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.065487 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0167e71-186e-4b01-a699-65cdaab0e1de" containerName="registry-server" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.066799 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.087510 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tqtl5"] Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.170585 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-utilities\") pod \"redhat-marketplace-tqtl5\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.170692 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-catalog-content\") pod \"redhat-marketplace-tqtl5\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.170720 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p4qh\" (UniqueName: \"kubernetes.io/projected/13446be3-49b3-46e4-aff7-57e165fd0f7e-kube-api-access-5p4qh\") pod \"redhat-marketplace-tqtl5\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.271813 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-catalog-content\") pod \"redhat-marketplace-tqtl5\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.271858 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p4qh\" (UniqueName: \"kubernetes.io/projected/13446be3-49b3-46e4-aff7-57e165fd0f7e-kube-api-access-5p4qh\") pod \"redhat-marketplace-tqtl5\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.271912 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-utilities\") pod \"redhat-marketplace-tqtl5\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.272437 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-utilities\") pod \"redhat-marketplace-tqtl5\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.272684 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-catalog-content\") pod \"redhat-marketplace-tqtl5\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.298104 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p4qh\" (UniqueName: \"kubernetes.io/projected/13446be3-49b3-46e4-aff7-57e165fd0f7e-kube-api-access-5p4qh\") pod \"redhat-marketplace-tqtl5\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.406814 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:21 crc kubenswrapper[4932]: I1125 10:11:21.887450 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tqtl5"] Nov 25 10:11:22 crc kubenswrapper[4932]: I1125 10:11:22.598525 4932 generic.go:334] "Generic (PLEG): container finished" podID="13446be3-49b3-46e4-aff7-57e165fd0f7e" containerID="1851ab45816386e0d32e7e32d1608744acf80266cc0e65a0befb3e5dfb5f50c3" exitCode=0 Nov 25 10:11:22 crc kubenswrapper[4932]: I1125 10:11:22.598653 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tqtl5" event={"ID":"13446be3-49b3-46e4-aff7-57e165fd0f7e","Type":"ContainerDied","Data":"1851ab45816386e0d32e7e32d1608744acf80266cc0e65a0befb3e5dfb5f50c3"} Nov 25 10:11:22 crc kubenswrapper[4932]: I1125 10:11:22.598863 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tqtl5" event={"ID":"13446be3-49b3-46e4-aff7-57e165fd0f7e","Type":"ContainerStarted","Data":"e10d192dacbb5697fd6af6c909061f5a6e3d5933d12507f71cf96b7d31dd708c"} Nov 25 10:11:23 crc kubenswrapper[4932]: I1125 10:11:23.609177 4932 generic.go:334] "Generic (PLEG): container finished" podID="13446be3-49b3-46e4-aff7-57e165fd0f7e" containerID="d1632c6ef0c3a2af040f4da08c2ee3aed4d95ba3e2c77d8abe72413aca430da2" exitCode=0 Nov 25 10:11:23 crc kubenswrapper[4932]: I1125 10:11:23.609251 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tqtl5" event={"ID":"13446be3-49b3-46e4-aff7-57e165fd0f7e","Type":"ContainerDied","Data":"d1632c6ef0c3a2af040f4da08c2ee3aed4d95ba3e2c77d8abe72413aca430da2"} Nov 25 10:11:24 crc kubenswrapper[4932]: I1125 10:11:24.626403 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tqtl5" event={"ID":"13446be3-49b3-46e4-aff7-57e165fd0f7e","Type":"ContainerStarted","Data":"e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de"} Nov 25 10:11:24 crc kubenswrapper[4932]: I1125 10:11:24.652886 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tqtl5" podStartSLOduration=2.252085894 podStartE2EDuration="3.652869767s" podCreationTimestamp="2025-11-25 10:11:21 +0000 UTC" firstStartedPulling="2025-11-25 10:11:22.601232242 +0000 UTC m=+4942.727261805" lastFinishedPulling="2025-11-25 10:11:24.002016115 +0000 UTC m=+4944.128045678" observedRunningTime="2025-11-25 10:11:24.648536283 +0000 UTC m=+4944.774565846" watchObservedRunningTime="2025-11-25 10:11:24.652869767 +0000 UTC m=+4944.778899330" Nov 25 10:11:31 crc kubenswrapper[4932]: I1125 10:11:31.407910 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:31 crc kubenswrapper[4932]: I1125 10:11:31.408705 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:31 crc kubenswrapper[4932]: I1125 10:11:31.458889 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:31 crc kubenswrapper[4932]: I1125 10:11:31.764554 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:31 crc kubenswrapper[4932]: I1125 10:11:31.827548 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tqtl5"] Nov 25 10:11:33 crc kubenswrapper[4932]: I1125 10:11:33.707807 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tqtl5" podUID="13446be3-49b3-46e4-aff7-57e165fd0f7e" containerName="registry-server" containerID="cri-o://e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de" gracePeriod=2 Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.210823 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.369883 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-catalog-content\") pod \"13446be3-49b3-46e4-aff7-57e165fd0f7e\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.370102 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-utilities\") pod \"13446be3-49b3-46e4-aff7-57e165fd0f7e\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.370153 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5p4qh\" (UniqueName: \"kubernetes.io/projected/13446be3-49b3-46e4-aff7-57e165fd0f7e-kube-api-access-5p4qh\") pod \"13446be3-49b3-46e4-aff7-57e165fd0f7e\" (UID: \"13446be3-49b3-46e4-aff7-57e165fd0f7e\") " Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.371443 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-utilities" (OuterVolumeSpecName: "utilities") pod "13446be3-49b3-46e4-aff7-57e165fd0f7e" (UID: "13446be3-49b3-46e4-aff7-57e165fd0f7e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.379002 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13446be3-49b3-46e4-aff7-57e165fd0f7e-kube-api-access-5p4qh" (OuterVolumeSpecName: "kube-api-access-5p4qh") pod "13446be3-49b3-46e4-aff7-57e165fd0f7e" (UID: "13446be3-49b3-46e4-aff7-57e165fd0f7e"). InnerVolumeSpecName "kube-api-access-5p4qh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.397301 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "13446be3-49b3-46e4-aff7-57e165fd0f7e" (UID: "13446be3-49b3-46e4-aff7-57e165fd0f7e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.472350 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.472402 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5p4qh\" (UniqueName: \"kubernetes.io/projected/13446be3-49b3-46e4-aff7-57e165fd0f7e-kube-api-access-5p4qh\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.472418 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13446be3-49b3-46e4-aff7-57e165fd0f7e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.716778 4932 generic.go:334] "Generic (PLEG): container finished" podID="13446be3-49b3-46e4-aff7-57e165fd0f7e" containerID="e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de" exitCode=0 Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.717181 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tqtl5" event={"ID":"13446be3-49b3-46e4-aff7-57e165fd0f7e","Type":"ContainerDied","Data":"e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de"} Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.717230 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tqtl5" event={"ID":"13446be3-49b3-46e4-aff7-57e165fd0f7e","Type":"ContainerDied","Data":"e10d192dacbb5697fd6af6c909061f5a6e3d5933d12507f71cf96b7d31dd708c"} Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.717248 4932 scope.go:117] "RemoveContainer" containerID="e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.717398 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tqtl5" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.742792 4932 scope.go:117] "RemoveContainer" containerID="d1632c6ef0c3a2af040f4da08c2ee3aed4d95ba3e2c77d8abe72413aca430da2" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.748136 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tqtl5"] Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.756968 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tqtl5"] Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.772512 4932 scope.go:117] "RemoveContainer" containerID="1851ab45816386e0d32e7e32d1608744acf80266cc0e65a0befb3e5dfb5f50c3" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.794903 4932 scope.go:117] "RemoveContainer" containerID="e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de" Nov 25 10:11:34 crc kubenswrapper[4932]: E1125 10:11:34.795419 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de\": container with ID starting with e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de not found: ID does not exist" containerID="e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.795495 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de"} err="failed to get container status \"e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de\": rpc error: code = NotFound desc = could not find container \"e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de\": container with ID starting with e665f5542f748b7c76bf6ea33f9f5ed46ee1f5419c82a3f7996636f5681893de not found: ID does not exist" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.795538 4932 scope.go:117] "RemoveContainer" containerID="d1632c6ef0c3a2af040f4da08c2ee3aed4d95ba3e2c77d8abe72413aca430da2" Nov 25 10:11:34 crc kubenswrapper[4932]: E1125 10:11:34.795904 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1632c6ef0c3a2af040f4da08c2ee3aed4d95ba3e2c77d8abe72413aca430da2\": container with ID starting with d1632c6ef0c3a2af040f4da08c2ee3aed4d95ba3e2c77d8abe72413aca430da2 not found: ID does not exist" containerID="d1632c6ef0c3a2af040f4da08c2ee3aed4d95ba3e2c77d8abe72413aca430da2" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.795940 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1632c6ef0c3a2af040f4da08c2ee3aed4d95ba3e2c77d8abe72413aca430da2"} err="failed to get container status \"d1632c6ef0c3a2af040f4da08c2ee3aed4d95ba3e2c77d8abe72413aca430da2\": rpc error: code = NotFound desc = could not find container \"d1632c6ef0c3a2af040f4da08c2ee3aed4d95ba3e2c77d8abe72413aca430da2\": container with ID starting with d1632c6ef0c3a2af040f4da08c2ee3aed4d95ba3e2c77d8abe72413aca430da2 not found: ID does not exist" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.795968 4932 scope.go:117] "RemoveContainer" containerID="1851ab45816386e0d32e7e32d1608744acf80266cc0e65a0befb3e5dfb5f50c3" Nov 25 10:11:34 crc kubenswrapper[4932]: E1125 10:11:34.796410 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1851ab45816386e0d32e7e32d1608744acf80266cc0e65a0befb3e5dfb5f50c3\": container with ID starting with 1851ab45816386e0d32e7e32d1608744acf80266cc0e65a0befb3e5dfb5f50c3 not found: ID does not exist" containerID="1851ab45816386e0d32e7e32d1608744acf80266cc0e65a0befb3e5dfb5f50c3" Nov 25 10:11:34 crc kubenswrapper[4932]: I1125 10:11:34.796435 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1851ab45816386e0d32e7e32d1608744acf80266cc0e65a0befb3e5dfb5f50c3"} err="failed to get container status \"1851ab45816386e0d32e7e32d1608744acf80266cc0e65a0befb3e5dfb5f50c3\": rpc error: code = NotFound desc = could not find container \"1851ab45816386e0d32e7e32d1608744acf80266cc0e65a0befb3e5dfb5f50c3\": container with ID starting with 1851ab45816386e0d32e7e32d1608744acf80266cc0e65a0befb3e5dfb5f50c3 not found: ID does not exist" Nov 25 10:11:36 crc kubenswrapper[4932]: I1125 10:11:36.616118 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13446be3-49b3-46e4-aff7-57e165fd0f7e" path="/var/lib/kubelet/pods/13446be3-49b3-46e4-aff7-57e165fd0f7e/volumes" Nov 25 10:11:37 crc kubenswrapper[4932]: I1125 10:11:37.180777 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:11:37 crc kubenswrapper[4932]: I1125 10:11:37.180842 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:12:07 crc kubenswrapper[4932]: I1125 10:12:07.181609 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:12:07 crc kubenswrapper[4932]: I1125 10:12:07.182346 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.013829 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-c87z9"] Nov 25 10:12:12 crc kubenswrapper[4932]: E1125 10:12:12.015339 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13446be3-49b3-46e4-aff7-57e165fd0f7e" containerName="extract-content" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.015357 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="13446be3-49b3-46e4-aff7-57e165fd0f7e" containerName="extract-content" Nov 25 10:12:12 crc kubenswrapper[4932]: E1125 10:12:12.015394 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13446be3-49b3-46e4-aff7-57e165fd0f7e" containerName="extract-utilities" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.015401 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="13446be3-49b3-46e4-aff7-57e165fd0f7e" containerName="extract-utilities" Nov 25 10:12:12 crc kubenswrapper[4932]: E1125 10:12:12.015418 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13446be3-49b3-46e4-aff7-57e165fd0f7e" containerName="registry-server" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.015428 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="13446be3-49b3-46e4-aff7-57e165fd0f7e" containerName="registry-server" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.015606 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="13446be3-49b3-46e4-aff7-57e165fd0f7e" containerName="registry-server" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.016766 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866449bdb9-c87z9" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.020941 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.021031 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.021522 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.021753 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-9q5b9" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.029310 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-c87z9"] Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.045581 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-49fs6"] Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.047307 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.050786 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.082890 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-49fs6"] Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.090713 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9v87\" (UniqueName: \"kubernetes.io/projected/60783181-9776-4dc3-a332-d558513d2c4d-kube-api-access-p9v87\") pod \"dnsmasq-dns-55c86457d7-49fs6\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.090769 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-dns-svc\") pod \"dnsmasq-dns-55c86457d7-49fs6\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.090793 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-config\") pod \"dnsmasq-dns-55c86457d7-49fs6\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.090815 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a97fa80e-68b6-4bce-8c1e-b07421735607-config\") pod \"dnsmasq-dns-866449bdb9-c87z9\" (UID: \"a97fa80e-68b6-4bce-8c1e-b07421735607\") " pod="openstack/dnsmasq-dns-866449bdb9-c87z9" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.090845 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nxz6\" (UniqueName: \"kubernetes.io/projected/a97fa80e-68b6-4bce-8c1e-b07421735607-kube-api-access-7nxz6\") pod \"dnsmasq-dns-866449bdb9-c87z9\" (UID: \"a97fa80e-68b6-4bce-8c1e-b07421735607\") " pod="openstack/dnsmasq-dns-866449bdb9-c87z9" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.192535 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-dns-svc\") pod \"dnsmasq-dns-55c86457d7-49fs6\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.192585 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-config\") pod \"dnsmasq-dns-55c86457d7-49fs6\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.192619 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a97fa80e-68b6-4bce-8c1e-b07421735607-config\") pod \"dnsmasq-dns-866449bdb9-c87z9\" (UID: \"a97fa80e-68b6-4bce-8c1e-b07421735607\") " pod="openstack/dnsmasq-dns-866449bdb9-c87z9" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.192657 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nxz6\" (UniqueName: \"kubernetes.io/projected/a97fa80e-68b6-4bce-8c1e-b07421735607-kube-api-access-7nxz6\") pod \"dnsmasq-dns-866449bdb9-c87z9\" (UID: \"a97fa80e-68b6-4bce-8c1e-b07421735607\") " pod="openstack/dnsmasq-dns-866449bdb9-c87z9" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.192727 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9v87\" (UniqueName: \"kubernetes.io/projected/60783181-9776-4dc3-a332-d558513d2c4d-kube-api-access-p9v87\") pod \"dnsmasq-dns-55c86457d7-49fs6\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.193733 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-dns-svc\") pod \"dnsmasq-dns-55c86457d7-49fs6\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.194035 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-config\") pod \"dnsmasq-dns-55c86457d7-49fs6\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.194459 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a97fa80e-68b6-4bce-8c1e-b07421735607-config\") pod \"dnsmasq-dns-866449bdb9-c87z9\" (UID: \"a97fa80e-68b6-4bce-8c1e-b07421735607\") " pod="openstack/dnsmasq-dns-866449bdb9-c87z9" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.215339 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nxz6\" (UniqueName: \"kubernetes.io/projected/a97fa80e-68b6-4bce-8c1e-b07421735607-kube-api-access-7nxz6\") pod \"dnsmasq-dns-866449bdb9-c87z9\" (UID: \"a97fa80e-68b6-4bce-8c1e-b07421735607\") " pod="openstack/dnsmasq-dns-866449bdb9-c87z9" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.226567 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9v87\" (UniqueName: \"kubernetes.io/projected/60783181-9776-4dc3-a332-d558513d2c4d-kube-api-access-p9v87\") pod \"dnsmasq-dns-55c86457d7-49fs6\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.336315 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866449bdb9-c87z9" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.418523 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.441514 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-c87z9"] Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.443259 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-wzsh5"] Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.450675 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.458611 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-wzsh5"] Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.498899 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-dns-svc\") pod \"dnsmasq-dns-f4c6c447c-wzsh5\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.499218 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t84m4\" (UniqueName: \"kubernetes.io/projected/09f050dc-7e90-4377-a507-249f9b184e11-kube-api-access-t84m4\") pod \"dnsmasq-dns-f4c6c447c-wzsh5\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.499373 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-config\") pod \"dnsmasq-dns-f4c6c447c-wzsh5\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.601901 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-dns-svc\") pod \"dnsmasq-dns-f4c6c447c-wzsh5\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.602020 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t84m4\" (UniqueName: \"kubernetes.io/projected/09f050dc-7e90-4377-a507-249f9b184e11-kube-api-access-t84m4\") pod \"dnsmasq-dns-f4c6c447c-wzsh5\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.602097 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-config\") pod \"dnsmasq-dns-f4c6c447c-wzsh5\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.603558 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-config\") pod \"dnsmasq-dns-f4c6c447c-wzsh5\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.604155 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-dns-svc\") pod \"dnsmasq-dns-f4c6c447c-wzsh5\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.648388 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t84m4\" (UniqueName: \"kubernetes.io/projected/09f050dc-7e90-4377-a507-249f9b184e11-kube-api-access-t84m4\") pod \"dnsmasq-dns-f4c6c447c-wzsh5\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.758872 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-49fs6"] Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.822821 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.837145 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-7l4p8"] Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.838385 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.852285 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-7l4p8"] Nov 25 10:12:12 crc kubenswrapper[4932]: I1125 10:12:12.999478 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-c87z9"] Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.025952 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8ll8\" (UniqueName: \"kubernetes.io/projected/35672445-3ce5-46a4-ab4c-d374506e1c6a-kube-api-access-z8ll8\") pod \"dnsmasq-dns-59c6c64b5c-7l4p8\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.026024 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-dns-svc\") pod \"dnsmasq-dns-59c6c64b5c-7l4p8\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.026121 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-config\") pod \"dnsmasq-dns-59c6c64b5c-7l4p8\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.125011 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866449bdb9-c87z9" event={"ID":"a97fa80e-68b6-4bce-8c1e-b07421735607","Type":"ContainerStarted","Data":"9741ee4ec459e7e2186bdfc5ce33b31d35426863d4eb2b0b33dae577e26e0a5f"} Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.129051 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8ll8\" (UniqueName: \"kubernetes.io/projected/35672445-3ce5-46a4-ab4c-d374506e1c6a-kube-api-access-z8ll8\") pod \"dnsmasq-dns-59c6c64b5c-7l4p8\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.129099 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-dns-svc\") pod \"dnsmasq-dns-59c6c64b5c-7l4p8\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.129234 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-config\") pod \"dnsmasq-dns-59c6c64b5c-7l4p8\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.130393 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-config\") pod \"dnsmasq-dns-59c6c64b5c-7l4p8\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.130524 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-dns-svc\") pod \"dnsmasq-dns-59c6c64b5c-7l4p8\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.148846 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-49fs6"] Nov 25 10:12:13 crc kubenswrapper[4932]: W1125 10:12:13.149376 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod60783181_9776_4dc3_a332_d558513d2c4d.slice/crio-ec4d4dcfd1ea901198c0421d51b3933b05131259277178d1850648757357e8d8 WatchSource:0}: Error finding container ec4d4dcfd1ea901198c0421d51b3933b05131259277178d1850648757357e8d8: Status 404 returned error can't find the container with id ec4d4dcfd1ea901198c0421d51b3933b05131259277178d1850648757357e8d8 Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.161452 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8ll8\" (UniqueName: \"kubernetes.io/projected/35672445-3ce5-46a4-ab4c-d374506e1c6a-kube-api-access-z8ll8\") pod \"dnsmasq-dns-59c6c64b5c-7l4p8\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.181738 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.379050 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-wzsh5"] Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.605261 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.607499 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.616452 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.616689 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.616737 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.616800 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-rm8hg" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.616959 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.616991 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.617021 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.617311 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.703482 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-7l4p8"] Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.746335 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6clv\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-kube-api-access-b6clv\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.746433 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.746473 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.746545 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1225a271-403f-4e91-b5b2-7b8a4aaae855-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.746747 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.746819 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.747025 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.747091 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.747363 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1225a271-403f-4e91-b5b2-7b8a4aaae855-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.747421 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.747507 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.849461 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.849579 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.850226 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.850307 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1225a271-403f-4e91-b5b2-7b8a4aaae855-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.850368 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.850404 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.850424 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6clv\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-kube-api-access-b6clv\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.850449 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.850489 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.850515 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1225a271-403f-4e91-b5b2-7b8a4aaae855-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.850554 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.850575 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.850977 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.851724 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.852396 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.852544 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.855098 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.856082 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.857147 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1225a271-403f-4e91-b5b2-7b8a4aaae855-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.859538 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.859578 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6ba10319f446a003ed4fcd0002ff9f0efb18640d00179771f05b30d41653cbb4/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.867005 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1225a271-403f-4e91-b5b2-7b8a4aaae855-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.872167 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6clv\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-kube-api-access-b6clv\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.895735 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\") pod \"rabbitmq-cell1-server-0\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.936407 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.941264 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.943267 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.946363 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.947783 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.948424 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.949170 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wftr4" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.949462 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.949619 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.952399 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 10:12:13 crc kubenswrapper[4932]: I1125 10:12:13.956082 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.053621 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/798d2263-61ba-4aa7-ba96-9971ee1080a8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.053710 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.053757 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.053797 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/798d2263-61ba-4aa7-ba96-9971ee1080a8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.053849 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.053875 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.053912 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.053944 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.053993 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lslnn\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-kube-api-access-lslnn\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.054008 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.054027 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-config-data\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.138571 4932 generic.go:334] "Generic (PLEG): container finished" podID="35672445-3ce5-46a4-ab4c-d374506e1c6a" containerID="687a3f755d8b79f5a12bdf00666684261ac455d4122e2ecd18d477f4d4e8996e" exitCode=0 Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.139265 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" event={"ID":"35672445-3ce5-46a4-ab4c-d374506e1c6a","Type":"ContainerDied","Data":"687a3f755d8b79f5a12bdf00666684261ac455d4122e2ecd18d477f4d4e8996e"} Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.140851 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" event={"ID":"35672445-3ce5-46a4-ab4c-d374506e1c6a","Type":"ContainerStarted","Data":"20651ac3531b53c9aa43b667e86caa27826e7ad41f91392fc372863ee55bea72"} Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.152179 4932 generic.go:334] "Generic (PLEG): container finished" podID="60783181-9776-4dc3-a332-d558513d2c4d" containerID="ce1e1712048923b27b433d1e803db7a846f538b9bb81c64448524d94cc205cad" exitCode=0 Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.152275 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c86457d7-49fs6" event={"ID":"60783181-9776-4dc3-a332-d558513d2c4d","Type":"ContainerDied","Data":"ce1e1712048923b27b433d1e803db7a846f538b9bb81c64448524d94cc205cad"} Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.152312 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c86457d7-49fs6" event={"ID":"60783181-9776-4dc3-a332-d558513d2c4d","Type":"ContainerStarted","Data":"ec4d4dcfd1ea901198c0421d51b3933b05131259277178d1850648757357e8d8"} Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.155185 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/798d2263-61ba-4aa7-ba96-9971ee1080a8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.155870 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.155896 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.155935 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.155968 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.156020 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lslnn\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-kube-api-access-lslnn\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.156042 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.156063 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-config-data\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.156085 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/798d2263-61ba-4aa7-ba96-9971ee1080a8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.156114 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.156150 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.157211 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.158538 4932 generic.go:334] "Generic (PLEG): container finished" podID="09f050dc-7e90-4377-a507-249f9b184e11" containerID="34c68bec8e239a5264783efb9e50b7fc04588788915374afcee04c93bf0a349b" exitCode=0 Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.159385 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.159705 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.159715 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" event={"ID":"09f050dc-7e90-4377-a507-249f9b184e11","Type":"ContainerDied","Data":"34c68bec8e239a5264783efb9e50b7fc04588788915374afcee04c93bf0a349b"} Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.159751 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" event={"ID":"09f050dc-7e90-4377-a507-249f9b184e11","Type":"ContainerStarted","Data":"d8b7f4c20efb24adbc66930dd026503b4fd7e6eae94d3385dd2c30317dbdfe7a"} Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.160675 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-config-data\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.167278 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-server-conf\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.172262 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/798d2263-61ba-4aa7-ba96-9971ee1080a8-pod-info\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.175753 4932 generic.go:334] "Generic (PLEG): container finished" podID="a97fa80e-68b6-4bce-8c1e-b07421735607" containerID="926f94bf08cbb090430fbca37c02d8ab7b8b6bed5565bb69193d2213996c2609" exitCode=0 Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.175806 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866449bdb9-c87z9" event={"ID":"a97fa80e-68b6-4bce-8c1e-b07421735607","Type":"ContainerDied","Data":"926f94bf08cbb090430fbca37c02d8ab7b8b6bed5565bb69193d2213996c2609"} Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.176676 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.176754 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7700a96a3d5894472790ba97f33bb0f7ed26f80d7ce59f029dac81014fa38d08/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.179317 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/798d2263-61ba-4aa7-ba96-9971ee1080a8-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.181505 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lslnn\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-kube-api-access-lslnn\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.183667 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.185996 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.224103 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\") pod \"rabbitmq-server-0\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.378088 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.489780 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:12:14 crc kubenswrapper[4932]: E1125 10:12:14.546948 4932 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 25 10:12:14 crc kubenswrapper[4932]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/09f050dc-7e90-4377-a507-249f9b184e11/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 25 10:12:14 crc kubenswrapper[4932]: > podSandboxID="d8b7f4c20efb24adbc66930dd026503b4fd7e6eae94d3385dd2c30317dbdfe7a" Nov 25 10:12:14 crc kubenswrapper[4932]: E1125 10:12:14.547116 4932 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 25 10:12:14 crc kubenswrapper[4932]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nb6hc5h68h68h594h659hdbh679h65ch5f6hdch6h5b9h8fh55hfhf8h57fhc7h56ch687h669h559h678h5dhc7hf7h697h5d6h9ch669h54fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t84m4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-f4c6c447c-wzsh5_openstack(09f050dc-7e90-4377-a507-249f9b184e11): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/09f050dc-7e90-4377-a507-249f9b184e11/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 25 10:12:14 crc kubenswrapper[4932]: > logger="UnhandledError" Nov 25 10:12:14 crc kubenswrapper[4932]: E1125 10:12:14.548287 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/09f050dc-7e90-4377-a507-249f9b184e11/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" podUID="09f050dc-7e90-4377-a507-249f9b184e11" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.618295 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.661182 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866449bdb9-c87z9" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.667768 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-dns-svc\") pod \"60783181-9776-4dc3-a332-d558513d2c4d\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.667942 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-config\") pod \"60783181-9776-4dc3-a332-d558513d2c4d\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.668091 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9v87\" (UniqueName: \"kubernetes.io/projected/60783181-9776-4dc3-a332-d558513d2c4d-kube-api-access-p9v87\") pod \"60783181-9776-4dc3-a332-d558513d2c4d\" (UID: \"60783181-9776-4dc3-a332-d558513d2c4d\") " Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.681961 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60783181-9776-4dc3-a332-d558513d2c4d-kube-api-access-p9v87" (OuterVolumeSpecName: "kube-api-access-p9v87") pod "60783181-9776-4dc3-a332-d558513d2c4d" (UID: "60783181-9776-4dc3-a332-d558513d2c4d"). InnerVolumeSpecName "kube-api-access-p9v87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.685935 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-config" (OuterVolumeSpecName: "config") pod "60783181-9776-4dc3-a332-d558513d2c4d" (UID: "60783181-9776-4dc3-a332-d558513d2c4d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.686566 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 25 10:12:14 crc kubenswrapper[4932]: E1125 10:12:14.706485 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a97fa80e-68b6-4bce-8c1e-b07421735607" containerName="init" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.706519 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a97fa80e-68b6-4bce-8c1e-b07421735607" containerName="init" Nov 25 10:12:14 crc kubenswrapper[4932]: E1125 10:12:14.706539 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60783181-9776-4dc3-a332-d558513d2c4d" containerName="init" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.706545 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="60783181-9776-4dc3-a332-d558513d2c4d" containerName="init" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.706747 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="60783181-9776-4dc3-a332-d558513d2c4d" containerName="init" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.706783 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a97fa80e-68b6-4bce-8c1e-b07421735607" containerName="init" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.707563 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.712898 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "60783181-9776-4dc3-a332-d558513d2c4d" (UID: "60783181-9776-4dc3-a332-d558513d2c4d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.714414 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-98ts2" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.714623 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.715663 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.715940 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.722339 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.735866 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.770535 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.770575 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9v87\" (UniqueName: \"kubernetes.io/projected/60783181-9776-4dc3-a332-d558513d2c4d-kube-api-access-p9v87\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.770589 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60783181-9776-4dc3-a332-d558513d2c4d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.871983 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nxz6\" (UniqueName: \"kubernetes.io/projected/a97fa80e-68b6-4bce-8c1e-b07421735607-kube-api-access-7nxz6\") pod \"a97fa80e-68b6-4bce-8c1e-b07421735607\" (UID: \"a97fa80e-68b6-4bce-8c1e-b07421735607\") " Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.872209 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a97fa80e-68b6-4bce-8c1e-b07421735607-config\") pod \"a97fa80e-68b6-4bce-8c1e-b07421735607\" (UID: \"a97fa80e-68b6-4bce-8c1e-b07421735607\") " Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.872609 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2c7a043a-8744-4406-9453-52aa0742cac0-kolla-config\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.872652 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c7a043a-8744-4406-9453-52aa0742cac0-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.872706 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2c7a043a-8744-4406-9453-52aa0742cac0-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.872784 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c7a043a-8744-4406-9453-52aa0742cac0-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.872872 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wb8hz\" (UniqueName: \"kubernetes.io/projected/2c7a043a-8744-4406-9453-52aa0742cac0-kube-api-access-wb8hz\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.873070 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2c7a043a-8744-4406-9453-52aa0742cac0-config-data-default\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.873139 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7a043a-8744-4406-9453-52aa0742cac0-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.873165 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-664abef8-1254-424e-92ed-fd305ba46e03\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-664abef8-1254-424e-92ed-fd305ba46e03\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.878393 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a97fa80e-68b6-4bce-8c1e-b07421735607-kube-api-access-7nxz6" (OuterVolumeSpecName: "kube-api-access-7nxz6") pod "a97fa80e-68b6-4bce-8c1e-b07421735607" (UID: "a97fa80e-68b6-4bce-8c1e-b07421735607"). InnerVolumeSpecName "kube-api-access-7nxz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.896796 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a97fa80e-68b6-4bce-8c1e-b07421735607-config" (OuterVolumeSpecName: "config") pod "a97fa80e-68b6-4bce-8c1e-b07421735607" (UID: "a97fa80e-68b6-4bce-8c1e-b07421735607"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.928993 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.978407 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2c7a043a-8744-4406-9453-52aa0742cac0-kolla-config\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.978486 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c7a043a-8744-4406-9453-52aa0742cac0-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.978553 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2c7a043a-8744-4406-9453-52aa0742cac0-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.978581 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c7a043a-8744-4406-9453-52aa0742cac0-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.978606 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wb8hz\" (UniqueName: \"kubernetes.io/projected/2c7a043a-8744-4406-9453-52aa0742cac0-kube-api-access-wb8hz\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.978682 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2c7a043a-8744-4406-9453-52aa0742cac0-config-data-default\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.978715 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7a043a-8744-4406-9453-52aa0742cac0-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.978739 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-664abef8-1254-424e-92ed-fd305ba46e03\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-664abef8-1254-424e-92ed-fd305ba46e03\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.978824 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nxz6\" (UniqueName: \"kubernetes.io/projected/a97fa80e-68b6-4bce-8c1e-b07421735607-kube-api-access-7nxz6\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.978837 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a97fa80e-68b6-4bce-8c1e-b07421735607-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.979641 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/2c7a043a-8744-4406-9453-52aa0742cac0-kolla-config\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.980642 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/2c7a043a-8744-4406-9453-52aa0742cac0-config-data-generated\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.981277 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2c7a043a-8744-4406-9453-52aa0742cac0-operator-scripts\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.985025 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/2c7a043a-8744-4406-9453-52aa0742cac0-config-data-default\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.986017 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c7a043a-8744-4406-9453-52aa0742cac0-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.992838 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c7a043a-8744-4406-9453-52aa0742cac0-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.993496 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:12:14 crc kubenswrapper[4932]: I1125 10:12:14.993525 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-664abef8-1254-424e-92ed-fd305ba46e03\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-664abef8-1254-424e-92ed-fd305ba46e03\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/a4f7f7f8e626d6887bcb4c62c650836d6fa71a1b5c5aff3b53cdf62ac436a273/globalmount\"" pod="openstack/openstack-galera-0" Nov 25 10:12:15 crc kubenswrapper[4932]: I1125 10:12:15.000831 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wb8hz\" (UniqueName: \"kubernetes.io/projected/2c7a043a-8744-4406-9453-52aa0742cac0-kube-api-access-wb8hz\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:15 crc kubenswrapper[4932]: I1125 10:12:15.027042 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-664abef8-1254-424e-92ed-fd305ba46e03\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-664abef8-1254-424e-92ed-fd305ba46e03\") pod \"openstack-galera-0\" (UID: \"2c7a043a-8744-4406-9453-52aa0742cac0\") " pod="openstack/openstack-galera-0" Nov 25 10:12:15 crc kubenswrapper[4932]: I1125 10:12:15.077053 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.193844 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1225a271-403f-4e91-b5b2-7b8a4aaae855","Type":"ContainerStarted","Data":"30005499c83c0b304bd91494eef323d18d61cbc0bdce0afdeecec7e7a2c5edf0"} Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.194838 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"798d2263-61ba-4aa7-ba96-9971ee1080a8","Type":"ContainerStarted","Data":"0cfcbd72f74d41ed5f37ba6fd4513e685e87099bc92fbd0339715f9dddf4e44b"} Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.198120 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866449bdb9-c87z9" event={"ID":"a97fa80e-68b6-4bce-8c1e-b07421735607","Type":"ContainerDied","Data":"9741ee4ec459e7e2186bdfc5ce33b31d35426863d4eb2b0b33dae577e26e0a5f"} Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.198158 4932 scope.go:117] "RemoveContainer" containerID="926f94bf08cbb090430fbca37c02d8ab7b8b6bed5565bb69193d2213996c2609" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.198315 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866449bdb9-c87z9" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.222017 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" event={"ID":"35672445-3ce5-46a4-ab4c-d374506e1c6a","Type":"ContainerStarted","Data":"99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf"} Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.222394 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.234910 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c86457d7-49fs6" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.235549 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c86457d7-49fs6" event={"ID":"60783181-9776-4dc3-a332-d558513d2c4d","Type":"ContainerDied","Data":"ec4d4dcfd1ea901198c0421d51b3933b05131259277178d1850648757357e8d8"} Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.298325 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" podStartSLOduration=3.298295541 podStartE2EDuration="3.298295541s" podCreationTimestamp="2025-11-25 10:12:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:12:15.254764724 +0000 UTC m=+4995.380794287" watchObservedRunningTime="2025-11-25 10:12:15.298295541 +0000 UTC m=+4995.424325104" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.373080 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-49fs6"] Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.390617 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-49fs6"] Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.412377 4932 scope.go:117] "RemoveContainer" containerID="ce1e1712048923b27b433d1e803db7a846f538b9bb81c64448524d94cc205cad" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.414797 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-c87z9"] Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:15.423278 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-c87z9"] Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.246349 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" event={"ID":"09f050dc-7e90-4377-a507-249f9b184e11","Type":"ContainerStarted","Data":"0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996"} Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.247072 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.247642 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1225a271-403f-4e91-b5b2-7b8a4aaae855","Type":"ContainerStarted","Data":"9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70"} Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.249122 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"798d2263-61ba-4aa7-ba96-9971ee1080a8","Type":"ContainerStarted","Data":"6fa72973c64c22eb34370b5cfe31d2b3108f9dad36d2d61ad5c7febc8c2cf4f3"} Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.276937 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" podStartSLOduration=4.276912164 podStartE2EDuration="4.276912164s" podCreationTimestamp="2025-11-25 10:12:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:12:16.268094112 +0000 UTC m=+4996.394123675" watchObservedRunningTime="2025-11-25 10:12:16.276912164 +0000 UTC m=+4996.402941727" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.355709 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.357242 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.360743 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.362716 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.364454 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.364712 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-fr24g" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.368149 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.429643 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.515103 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/254c53f3-4702-4634-804b-00a97e09522f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.515185 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwdm8\" (UniqueName: \"kubernetes.io/projected/254c53f3-4702-4634-804b-00a97e09522f-kube-api-access-cwdm8\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.515330 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.515385 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.515408 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-534a475c-6cdd-4d7b-b182-55174e15a803\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.515435 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.515461 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.515556 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.616921 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/254c53f3-4702-4634-804b-00a97e09522f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.616992 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwdm8\" (UniqueName: \"kubernetes.io/projected/254c53f3-4702-4634-804b-00a97e09522f-kube-api-access-cwdm8\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.617036 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.617088 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.617118 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-534a475c-6cdd-4d7b-b182-55174e15a803\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.617149 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.617176 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.617211 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.617930 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/254c53f3-4702-4634-804b-00a97e09522f-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.618744 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.619397 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.619822 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.625119 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60783181-9776-4dc3-a332-d558513d2c4d" path="/var/lib/kubelet/pods/60783181-9776-4dc3-a332-d558513d2c4d/volumes" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.625113 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.625888 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a97fa80e-68b6-4bce-8c1e-b07421735607" path="/var/lib/kubelet/pods/a97fa80e-68b6-4bce-8c1e-b07421735607/volumes" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.629132 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.630540 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.630605 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-534a475c-6cdd-4d7b-b182-55174e15a803\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0a53af82a3a8b6e47bbd6ca4026c1baffb65a40b1b788c1024f8c1dad29f8e29/globalmount\"" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.651648 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwdm8\" (UniqueName: \"kubernetes.io/projected/254c53f3-4702-4634-804b-00a97e09522f-kube-api-access-cwdm8\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.669867 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-534a475c-6cdd-4d7b-b182-55174e15a803\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803\") pod \"openstack-cell1-galera-0\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:16 crc kubenswrapper[4932]: I1125 10:12:16.751634 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:17 crc kubenswrapper[4932]: I1125 10:12:17.268780 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2c7a043a-8744-4406-9453-52aa0742cac0","Type":"ContainerStarted","Data":"8bae2c6e2063eafc6517aa45233588d62c4761a5368109a0961296e7633e5436"} Nov 25 10:12:17 crc kubenswrapper[4932]: I1125 10:12:17.269148 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2c7a043a-8744-4406-9453-52aa0742cac0","Type":"ContainerStarted","Data":"136f9720ea6535027202dbb064bab4db9eea31359186e31d1de853079682f0d7"} Nov 25 10:12:17 crc kubenswrapper[4932]: I1125 10:12:17.280159 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:12:17 crc kubenswrapper[4932]: W1125 10:12:17.290804 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod254c53f3_4702_4634_804b_00a97e09522f.slice/crio-2e07b8f28c4d204ed838be955f279af814ccb75062df42f2fdbcc23295489f86 WatchSource:0}: Error finding container 2e07b8f28c4d204ed838be955f279af814ccb75062df42f2fdbcc23295489f86: Status 404 returned error can't find the container with id 2e07b8f28c4d204ed838be955f279af814ccb75062df42f2fdbcc23295489f86 Nov 25 10:12:17 crc kubenswrapper[4932]: I1125 10:12:17.814494 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.276527 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"254c53f3-4702-4634-804b-00a97e09522f","Type":"ContainerStarted","Data":"d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849"} Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.276864 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"254c53f3-4702-4634-804b-00a97e09522f","Type":"ContainerStarted","Data":"2e07b8f28c4d204ed838be955f279af814ccb75062df42f2fdbcc23295489f86"} Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.517041 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.518361 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.521060 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-8h5xh" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.521674 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.522108 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.534267 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.554631 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvdzg\" (UniqueName: \"kubernetes.io/projected/ef7b84cb-850f-4119-876a-4887f1ba11a9-kube-api-access-cvdzg\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.554707 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ef7b84cb-850f-4119-876a-4887f1ba11a9-kolla-config\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.554769 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef7b84cb-850f-4119-876a-4887f1ba11a9-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.554827 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef7b84cb-850f-4119-876a-4887f1ba11a9-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.554995 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ef7b84cb-850f-4119-876a-4887f1ba11a9-config-data\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.656751 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ef7b84cb-850f-4119-876a-4887f1ba11a9-config-data\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.656843 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvdzg\" (UniqueName: \"kubernetes.io/projected/ef7b84cb-850f-4119-876a-4887f1ba11a9-kube-api-access-cvdzg\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.656877 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ef7b84cb-850f-4119-876a-4887f1ba11a9-kolla-config\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.656935 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef7b84cb-850f-4119-876a-4887f1ba11a9-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.656979 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef7b84cb-850f-4119-876a-4887f1ba11a9-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.658404 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ef7b84cb-850f-4119-876a-4887f1ba11a9-config-data\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.658413 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ef7b84cb-850f-4119-876a-4887f1ba11a9-kolla-config\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.667908 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/ef7b84cb-850f-4119-876a-4887f1ba11a9-memcached-tls-certs\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.672531 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef7b84cb-850f-4119-876a-4887f1ba11a9-combined-ca-bundle\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.676272 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvdzg\" (UniqueName: \"kubernetes.io/projected/ef7b84cb-850f-4119-876a-4887f1ba11a9-kube-api-access-cvdzg\") pod \"memcached-0\" (UID: \"ef7b84cb-850f-4119-876a-4887f1ba11a9\") " pod="openstack/memcached-0" Nov 25 10:12:18 crc kubenswrapper[4932]: I1125 10:12:18.851967 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 10:12:19 crc kubenswrapper[4932]: I1125 10:12:19.284030 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="254c53f3-4702-4634-804b-00a97e09522f" containerName="mysql-bootstrap" containerID="cri-o://d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849" gracePeriod=30 Nov 25 10:12:19 crc kubenswrapper[4932]: I1125 10:12:19.353922 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 10:12:20 crc kubenswrapper[4932]: I1125 10:12:20.292845 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"ef7b84cb-850f-4119-876a-4887f1ba11a9","Type":"ContainerStarted","Data":"3d9b6b142f96f8d32d03695bf2be88fd082ef9c79ce16fb7786964dea943d82c"} Nov 25 10:12:20 crc kubenswrapper[4932]: I1125 10:12:20.292926 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"ef7b84cb-850f-4119-876a-4887f1ba11a9","Type":"ContainerStarted","Data":"cd9bb0887d97c0717e2e4dc4522944686f395791e3b17265212b0410820d8ea0"} Nov 25 10:12:20 crc kubenswrapper[4932]: I1125 10:12:20.292966 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 25 10:12:20 crc kubenswrapper[4932]: I1125 10:12:20.318632 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.318615221 podStartE2EDuration="2.318615221s" podCreationTimestamp="2025-11-25 10:12:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:12:20.316951714 +0000 UTC m=+5000.442981287" watchObservedRunningTime="2025-11-25 10:12:20.318615221 +0000 UTC m=+5000.444644784" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.286717 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.321886 4932 generic.go:334] "Generic (PLEG): container finished" podID="2c7a043a-8744-4406-9453-52aa0742cac0" containerID="8bae2c6e2063eafc6517aa45233588d62c4761a5368109a0961296e7633e5436" exitCode=0 Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.322006 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2c7a043a-8744-4406-9453-52aa0742cac0","Type":"ContainerDied","Data":"8bae2c6e2063eafc6517aa45233588d62c4761a5368109a0961296e7633e5436"} Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.325855 4932 generic.go:334] "Generic (PLEG): container finished" podID="254c53f3-4702-4634-804b-00a97e09522f" containerID="d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849" exitCode=0 Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.326849 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"254c53f3-4702-4634-804b-00a97e09522f","Type":"ContainerDied","Data":"d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849"} Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.326899 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"254c53f3-4702-4634-804b-00a97e09522f","Type":"ContainerDied","Data":"2e07b8f28c4d204ed838be955f279af814ccb75062df42f2fdbcc23295489f86"} Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.326933 4932 scope.go:117] "RemoveContainer" containerID="d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.326934 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.361310 4932 scope.go:117] "RemoveContainer" containerID="d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849" Nov 25 10:12:21 crc kubenswrapper[4932]: E1125 10:12:21.364362 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849\": container with ID starting with d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849 not found: ID does not exist" containerID="d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.364412 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849"} err="failed to get container status \"d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849\": rpc error: code = NotFound desc = could not find container \"d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849\": container with ID starting with d82116e28892ec7d4ab2cd846d484e32632cd02760627ed9f70ae5bb48d17849 not found: ID does not exist" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.415838 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803\") pod \"254c53f3-4702-4634-804b-00a97e09522f\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.415921 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-config-data-default\") pod \"254c53f3-4702-4634-804b-00a97e09522f\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.415996 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwdm8\" (UniqueName: \"kubernetes.io/projected/254c53f3-4702-4634-804b-00a97e09522f-kube-api-access-cwdm8\") pod \"254c53f3-4702-4634-804b-00a97e09522f\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.416106 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-operator-scripts\") pod \"254c53f3-4702-4634-804b-00a97e09522f\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.416144 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-galera-tls-certs\") pod \"254c53f3-4702-4634-804b-00a97e09522f\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.416238 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-combined-ca-bundle\") pod \"254c53f3-4702-4634-804b-00a97e09522f\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.416332 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-kolla-config\") pod \"254c53f3-4702-4634-804b-00a97e09522f\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.416440 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/254c53f3-4702-4634-804b-00a97e09522f-config-data-generated\") pod \"254c53f3-4702-4634-804b-00a97e09522f\" (UID: \"254c53f3-4702-4634-804b-00a97e09522f\") " Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.419007 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "254c53f3-4702-4634-804b-00a97e09522f" (UID: "254c53f3-4702-4634-804b-00a97e09522f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.420306 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "254c53f3-4702-4634-804b-00a97e09522f" (UID: "254c53f3-4702-4634-804b-00a97e09522f"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.420762 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "254c53f3-4702-4634-804b-00a97e09522f" (UID: "254c53f3-4702-4634-804b-00a97e09522f"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.420974 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/254c53f3-4702-4634-804b-00a97e09522f-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "254c53f3-4702-4634-804b-00a97e09522f" (UID: "254c53f3-4702-4634-804b-00a97e09522f"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.423926 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "254c53f3-4702-4634-804b-00a97e09522f" (UID: "254c53f3-4702-4634-804b-00a97e09522f"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.425413 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/254c53f3-4702-4634-804b-00a97e09522f-kube-api-access-cwdm8" (OuterVolumeSpecName: "kube-api-access-cwdm8") pod "254c53f3-4702-4634-804b-00a97e09522f" (UID: "254c53f3-4702-4634-804b-00a97e09522f"). InnerVolumeSpecName "kube-api-access-cwdm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.428171 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "254c53f3-4702-4634-804b-00a97e09522f" (UID: "254c53f3-4702-4634-804b-00a97e09522f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.439117 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803" (OuterVolumeSpecName: "mysql-db") pod "254c53f3-4702-4634-804b-00a97e09522f" (UID: "254c53f3-4702-4634-804b-00a97e09522f"). InnerVolumeSpecName "pvc-534a475c-6cdd-4d7b-b182-55174e15a803". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.519048 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/254c53f3-4702-4634-804b-00a97e09522f-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.519204 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-534a475c-6cdd-4d7b-b182-55174e15a803\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803\") on node \"crc\" " Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.519229 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.519244 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwdm8\" (UniqueName: \"kubernetes.io/projected/254c53f3-4702-4634-804b-00a97e09522f-kube-api-access-cwdm8\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.519257 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.519268 4932 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.519279 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/254c53f3-4702-4634-804b-00a97e09522f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.519289 4932 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/254c53f3-4702-4634-804b-00a97e09522f-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.541673 4932 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.541933 4932 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-534a475c-6cdd-4d7b-b182-55174e15a803" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803") on node "crc" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.620897 4932 reconciler_common.go:293] "Volume detached for volume \"pvc-534a475c-6cdd-4d7b-b182-55174e15a803\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.715104 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.723333 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.757497 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:12:21 crc kubenswrapper[4932]: E1125 10:12:21.757849 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="254c53f3-4702-4634-804b-00a97e09522f" containerName="mysql-bootstrap" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.757868 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="254c53f3-4702-4634-804b-00a97e09522f" containerName="mysql-bootstrap" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.758007 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="254c53f3-4702-4634-804b-00a97e09522f" containerName="mysql-bootstrap" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.758866 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.767763 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.767792 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.768072 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-fr24g" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.768609 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.781136 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.828788 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3fe647fc-e13b-40b7-8368-8d2348e5305e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.828883 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3fe647fc-e13b-40b7-8368-8d2348e5305e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.828937 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-534a475c-6cdd-4d7b-b182-55174e15a803\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.828980 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9l2f\" (UniqueName: \"kubernetes.io/projected/3fe647fc-e13b-40b7-8368-8d2348e5305e-kube-api-access-s9l2f\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.829014 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3fe647fc-e13b-40b7-8368-8d2348e5305e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.829059 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3fe647fc-e13b-40b7-8368-8d2348e5305e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.829083 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe647fc-e13b-40b7-8368-8d2348e5305e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.829133 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3fe647fc-e13b-40b7-8368-8d2348e5305e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.931122 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3fe647fc-e13b-40b7-8368-8d2348e5305e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.931200 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe647fc-e13b-40b7-8368-8d2348e5305e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.931316 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3fe647fc-e13b-40b7-8368-8d2348e5305e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.931371 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3fe647fc-e13b-40b7-8368-8d2348e5305e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.931476 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3fe647fc-e13b-40b7-8368-8d2348e5305e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.932383 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/3fe647fc-e13b-40b7-8368-8d2348e5305e-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.932615 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/3fe647fc-e13b-40b7-8368-8d2348e5305e-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.933055 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3fe647fc-e13b-40b7-8368-8d2348e5305e-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.933304 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-534a475c-6cdd-4d7b-b182-55174e15a803\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.933410 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9l2f\" (UniqueName: \"kubernetes.io/projected/3fe647fc-e13b-40b7-8368-8d2348e5305e-kube-api-access-s9l2f\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.933818 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3fe647fc-e13b-40b7-8368-8d2348e5305e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.934096 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/3fe647fc-e13b-40b7-8368-8d2348e5305e-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.935793 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.935838 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-534a475c-6cdd-4d7b-b182-55174e15a803\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/0a53af82a3a8b6e47bbd6ca4026c1baffb65a40b1b788c1024f8c1dad29f8e29/globalmount\"" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.936064 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/3fe647fc-e13b-40b7-8368-8d2348e5305e-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.936401 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fe647fc-e13b-40b7-8368-8d2348e5305e-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.952069 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9l2f\" (UniqueName: \"kubernetes.io/projected/3fe647fc-e13b-40b7-8368-8d2348e5305e-kube-api-access-s9l2f\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:21 crc kubenswrapper[4932]: I1125 10:12:21.962917 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-534a475c-6cdd-4d7b-b182-55174e15a803\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-534a475c-6cdd-4d7b-b182-55174e15a803\") pod \"openstack-cell1-galera-0\" (UID: \"3fe647fc-e13b-40b7-8368-8d2348e5305e\") " pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:22 crc kubenswrapper[4932]: I1125 10:12:22.083422 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:22 crc kubenswrapper[4932]: I1125 10:12:22.351919 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"2c7a043a-8744-4406-9453-52aa0742cac0","Type":"ContainerStarted","Data":"195c70d866120e892ebd7f4524298bb8b44301e3bb400a983bd1553c97338ab1"} Nov 25 10:12:22 crc kubenswrapper[4932]: I1125 10:12:22.385236 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=9.385176589 podStartE2EDuration="9.385176589s" podCreationTimestamp="2025-11-25 10:12:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:12:22.38380739 +0000 UTC m=+5002.509836963" watchObservedRunningTime="2025-11-25 10:12:22.385176589 +0000 UTC m=+5002.511206162" Nov 25 10:12:22 crc kubenswrapper[4932]: I1125 10:12:22.614993 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="254c53f3-4702-4634-804b-00a97e09522f" path="/var/lib/kubelet/pods/254c53f3-4702-4634-804b-00a97e09522f/volumes" Nov 25 10:12:22 crc kubenswrapper[4932]: W1125 10:12:22.673542 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3fe647fc_e13b_40b7_8368_8d2348e5305e.slice/crio-128c272c2a17184cd2626e15713570e879cd0957f93cd9e1c2bace5effd9e579 WatchSource:0}: Error finding container 128c272c2a17184cd2626e15713570e879cd0957f93cd9e1c2bace5effd9e579: Status 404 returned error can't find the container with id 128c272c2a17184cd2626e15713570e879cd0957f93cd9e1c2bace5effd9e579 Nov 25 10:12:22 crc kubenswrapper[4932]: I1125 10:12:22.675740 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 10:12:22 crc kubenswrapper[4932]: I1125 10:12:22.826005 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.183344 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.247930 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-wzsh5"] Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.361713 4932 generic.go:334] "Generic (PLEG): container finished" podID="3fe647fc-e13b-40b7-8368-8d2348e5305e" containerID="9847d8ffcf3b57cea15ba80f4b3c41585ed28d5c9502f405b04bb54b5e23ac3b" exitCode=0 Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.361812 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3fe647fc-e13b-40b7-8368-8d2348e5305e","Type":"ContainerDied","Data":"9847d8ffcf3b57cea15ba80f4b3c41585ed28d5c9502f405b04bb54b5e23ac3b"} Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.361862 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3fe647fc-e13b-40b7-8368-8d2348e5305e","Type":"ContainerStarted","Data":"128c272c2a17184cd2626e15713570e879cd0957f93cd9e1c2bace5effd9e579"} Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.362037 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" podUID="09f050dc-7e90-4377-a507-249f9b184e11" containerName="dnsmasq-dns" containerID="cri-o://0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996" gracePeriod=10 Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.743325 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.867228 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-dns-svc\") pod \"09f050dc-7e90-4377-a507-249f9b184e11\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.867314 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-config\") pod \"09f050dc-7e90-4377-a507-249f9b184e11\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.867437 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t84m4\" (UniqueName: \"kubernetes.io/projected/09f050dc-7e90-4377-a507-249f9b184e11-kube-api-access-t84m4\") pod \"09f050dc-7e90-4377-a507-249f9b184e11\" (UID: \"09f050dc-7e90-4377-a507-249f9b184e11\") " Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.873722 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09f050dc-7e90-4377-a507-249f9b184e11-kube-api-access-t84m4" (OuterVolumeSpecName: "kube-api-access-t84m4") pod "09f050dc-7e90-4377-a507-249f9b184e11" (UID: "09f050dc-7e90-4377-a507-249f9b184e11"). InnerVolumeSpecName "kube-api-access-t84m4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.901306 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "09f050dc-7e90-4377-a507-249f9b184e11" (UID: "09f050dc-7e90-4377-a507-249f9b184e11"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.906843 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-config" (OuterVolumeSpecName: "config") pod "09f050dc-7e90-4377-a507-249f9b184e11" (UID: "09f050dc-7e90-4377-a507-249f9b184e11"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.971515 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.971563 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09f050dc-7e90-4377-a507-249f9b184e11-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:23 crc kubenswrapper[4932]: I1125 10:12:23.971577 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t84m4\" (UniqueName: \"kubernetes.io/projected/09f050dc-7e90-4377-a507-249f9b184e11-kube-api-access-t84m4\") on node \"crc\" DevicePath \"\"" Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.372791 4932 generic.go:334] "Generic (PLEG): container finished" podID="09f050dc-7e90-4377-a507-249f9b184e11" containerID="0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996" exitCode=0 Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.372848 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.372891 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" event={"ID":"09f050dc-7e90-4377-a507-249f9b184e11","Type":"ContainerDied","Data":"0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996"} Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.372945 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4c6c447c-wzsh5" event={"ID":"09f050dc-7e90-4377-a507-249f9b184e11","Type":"ContainerDied","Data":"d8b7f4c20efb24adbc66930dd026503b4fd7e6eae94d3385dd2c30317dbdfe7a"} Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.372972 4932 scope.go:117] "RemoveContainer" containerID="0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996" Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.377394 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"3fe647fc-e13b-40b7-8368-8d2348e5305e","Type":"ContainerStarted","Data":"0a03ff4a4392dab3c0f0e85002f51294ae8644d59d5dc8c1b604373ffe52eefe"} Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.400316 4932 scope.go:117] "RemoveContainer" containerID="34c68bec8e239a5264783efb9e50b7fc04588788915374afcee04c93bf0a349b" Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.423431 4932 scope.go:117] "RemoveContainer" containerID="0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996" Nov 25 10:12:24 crc kubenswrapper[4932]: E1125 10:12:24.424013 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996\": container with ID starting with 0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996 not found: ID does not exist" containerID="0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996" Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.424045 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996"} err="failed to get container status \"0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996\": rpc error: code = NotFound desc = could not find container \"0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996\": container with ID starting with 0d3d0953fbe60f71e5478e49d0b1e1580edaf6e6a70f8cd145697b77a5f79996 not found: ID does not exist" Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.424065 4932 scope.go:117] "RemoveContainer" containerID="34c68bec8e239a5264783efb9e50b7fc04588788915374afcee04c93bf0a349b" Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.428379 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=3.428362638 podStartE2EDuration="3.428362638s" podCreationTimestamp="2025-11-25 10:12:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:12:24.424076355 +0000 UTC m=+5004.550105928" watchObservedRunningTime="2025-11-25 10:12:24.428362638 +0000 UTC m=+5004.554392211" Nov 25 10:12:24 crc kubenswrapper[4932]: E1125 10:12:24.429113 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34c68bec8e239a5264783efb9e50b7fc04588788915374afcee04c93bf0a349b\": container with ID starting with 34c68bec8e239a5264783efb9e50b7fc04588788915374afcee04c93bf0a349b not found: ID does not exist" containerID="34c68bec8e239a5264783efb9e50b7fc04588788915374afcee04c93bf0a349b" Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.429153 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34c68bec8e239a5264783efb9e50b7fc04588788915374afcee04c93bf0a349b"} err="failed to get container status \"34c68bec8e239a5264783efb9e50b7fc04588788915374afcee04c93bf0a349b\": rpc error: code = NotFound desc = could not find container \"34c68bec8e239a5264783efb9e50b7fc04588788915374afcee04c93bf0a349b\": container with ID starting with 34c68bec8e239a5264783efb9e50b7fc04588788915374afcee04c93bf0a349b not found: ID does not exist" Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.453685 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-wzsh5"] Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.461115 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-wzsh5"] Nov 25 10:12:24 crc kubenswrapper[4932]: I1125 10:12:24.617137 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09f050dc-7e90-4377-a507-249f9b184e11" path="/var/lib/kubelet/pods/09f050dc-7e90-4377-a507-249f9b184e11/volumes" Nov 25 10:12:25 crc kubenswrapper[4932]: I1125 10:12:25.077690 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 10:12:25 crc kubenswrapper[4932]: I1125 10:12:25.077750 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 10:12:27 crc kubenswrapper[4932]: I1125 10:12:27.276563 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 10:12:27 crc kubenswrapper[4932]: I1125 10:12:27.369985 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 10:12:28 crc kubenswrapper[4932]: I1125 10:12:28.853321 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 25 10:12:32 crc kubenswrapper[4932]: I1125 10:12:32.084175 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:32 crc kubenswrapper[4932]: I1125 10:12:32.084510 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:32 crc kubenswrapper[4932]: I1125 10:12:32.216713 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:32 crc kubenswrapper[4932]: I1125 10:12:32.527977 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 25 10:12:37 crc kubenswrapper[4932]: I1125 10:12:37.180898 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:12:37 crc kubenswrapper[4932]: I1125 10:12:37.182659 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:12:37 crc kubenswrapper[4932]: I1125 10:12:37.182812 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 10:12:37 crc kubenswrapper[4932]: I1125 10:12:37.183649 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:12:37 crc kubenswrapper[4932]: I1125 10:12:37.183784 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" gracePeriod=600 Nov 25 10:12:37 crc kubenswrapper[4932]: E1125 10:12:37.309563 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:12:37 crc kubenswrapper[4932]: I1125 10:12:37.488171 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" exitCode=0 Nov 25 10:12:37 crc kubenswrapper[4932]: I1125 10:12:37.488234 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f"} Nov 25 10:12:37 crc kubenswrapper[4932]: I1125 10:12:37.488604 4932 scope.go:117] "RemoveContainer" containerID="b1eccdea28624af1d89e99001e8aa1973651621a0b0dbb3c72f710d48119bf2d" Nov 25 10:12:37 crc kubenswrapper[4932]: I1125 10:12:37.489481 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:12:37 crc kubenswrapper[4932]: E1125 10:12:37.489977 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:12:48 crc kubenswrapper[4932]: I1125 10:12:48.592134 4932 generic.go:334] "Generic (PLEG): container finished" podID="798d2263-61ba-4aa7-ba96-9971ee1080a8" containerID="6fa72973c64c22eb34370b5cfe31d2b3108f9dad36d2d61ad5c7febc8c2cf4f3" exitCode=0 Nov 25 10:12:48 crc kubenswrapper[4932]: I1125 10:12:48.592313 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"798d2263-61ba-4aa7-ba96-9971ee1080a8","Type":"ContainerDied","Data":"6fa72973c64c22eb34370b5cfe31d2b3108f9dad36d2d61ad5c7febc8c2cf4f3"} Nov 25 10:12:48 crc kubenswrapper[4932]: I1125 10:12:48.596677 4932 generic.go:334] "Generic (PLEG): container finished" podID="1225a271-403f-4e91-b5b2-7b8a4aaae855" containerID="9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70" exitCode=0 Nov 25 10:12:48 crc kubenswrapper[4932]: I1125 10:12:48.596742 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1225a271-403f-4e91-b5b2-7b8a4aaae855","Type":"ContainerDied","Data":"9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70"} Nov 25 10:12:49 crc kubenswrapper[4932]: I1125 10:12:49.608343 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1225a271-403f-4e91-b5b2-7b8a4aaae855","Type":"ContainerStarted","Data":"43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32"} Nov 25 10:12:49 crc kubenswrapper[4932]: I1125 10:12:49.609237 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:12:49 crc kubenswrapper[4932]: I1125 10:12:49.610916 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"798d2263-61ba-4aa7-ba96-9971ee1080a8","Type":"ContainerStarted","Data":"7dba1df34f66955c81326ceaa81bbf678abc5b43713788203fc8f022e0eb56e7"} Nov 25 10:12:49 crc kubenswrapper[4932]: I1125 10:12:49.611496 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 10:12:49 crc kubenswrapper[4932]: I1125 10:12:49.647279 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.647257828 podStartE2EDuration="37.647257828s" podCreationTimestamp="2025-11-25 10:12:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:12:49.645416185 +0000 UTC m=+5029.771445758" watchObservedRunningTime="2025-11-25 10:12:49.647257828 +0000 UTC m=+5029.773287391" Nov 25 10:12:50 crc kubenswrapper[4932]: I1125 10:12:50.615986 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:12:50 crc kubenswrapper[4932]: E1125 10:12:50.616384 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:13:03 crc kubenswrapper[4932]: I1125 10:13:03.940368 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:03 crc kubenswrapper[4932]: I1125 10:13:03.984097 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=51.984067995 podStartE2EDuration="51.984067995s" podCreationTimestamp="2025-11-25 10:12:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:12:49.676579828 +0000 UTC m=+5029.802609411" watchObservedRunningTime="2025-11-25 10:13:03.984067995 +0000 UTC m=+5044.110097558" Nov 25 10:13:04 crc kubenswrapper[4932]: I1125 10:13:04.381423 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 10:13:04 crc kubenswrapper[4932]: I1125 10:13:04.606697 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:13:04 crc kubenswrapper[4932]: E1125 10:13:04.607143 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:13:08 crc kubenswrapper[4932]: I1125 10:13:08.843039 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54564445dc-vjls5"] Nov 25 10:13:08 crc kubenswrapper[4932]: E1125 10:13:08.843828 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09f050dc-7e90-4377-a507-249f9b184e11" containerName="init" Nov 25 10:13:08 crc kubenswrapper[4932]: I1125 10:13:08.843845 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="09f050dc-7e90-4377-a507-249f9b184e11" containerName="init" Nov 25 10:13:08 crc kubenswrapper[4932]: E1125 10:13:08.843883 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09f050dc-7e90-4377-a507-249f9b184e11" containerName="dnsmasq-dns" Nov 25 10:13:08 crc kubenswrapper[4932]: I1125 10:13:08.843892 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="09f050dc-7e90-4377-a507-249f9b184e11" containerName="dnsmasq-dns" Nov 25 10:13:08 crc kubenswrapper[4932]: I1125 10:13:08.844213 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="09f050dc-7e90-4377-a507-249f9b184e11" containerName="dnsmasq-dns" Nov 25 10:13:08 crc kubenswrapper[4932]: I1125 10:13:08.845241 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:08 crc kubenswrapper[4932]: I1125 10:13:08.858784 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54564445dc-vjls5"] Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.020429 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr9sb\" (UniqueName: \"kubernetes.io/projected/cce306ed-cc41-4903-ad78-edc818f19fdc-kube-api-access-xr9sb\") pod \"dnsmasq-dns-54564445dc-vjls5\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.020665 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-config\") pod \"dnsmasq-dns-54564445dc-vjls5\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.020762 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-dns-svc\") pod \"dnsmasq-dns-54564445dc-vjls5\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.122405 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-config\") pod \"dnsmasq-dns-54564445dc-vjls5\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.122696 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-dns-svc\") pod \"dnsmasq-dns-54564445dc-vjls5\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.122724 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr9sb\" (UniqueName: \"kubernetes.io/projected/cce306ed-cc41-4903-ad78-edc818f19fdc-kube-api-access-xr9sb\") pod \"dnsmasq-dns-54564445dc-vjls5\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.124034 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-dns-svc\") pod \"dnsmasq-dns-54564445dc-vjls5\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.124088 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-config\") pod \"dnsmasq-dns-54564445dc-vjls5\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.145040 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr9sb\" (UniqueName: \"kubernetes.io/projected/cce306ed-cc41-4903-ad78-edc818f19fdc-kube-api-access-xr9sb\") pod \"dnsmasq-dns-54564445dc-vjls5\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.172232 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.616624 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.669760 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54564445dc-vjls5"] Nov 25 10:13:09 crc kubenswrapper[4932]: I1125 10:13:09.787971 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54564445dc-vjls5" event={"ID":"cce306ed-cc41-4903-ad78-edc818f19fdc","Type":"ContainerStarted","Data":"9532310b208e34bffc59166b263eaa55f64969ec7e6cd2e094c2bca2cb8bcf9f"} Nov 25 10:13:10 crc kubenswrapper[4932]: I1125 10:13:10.399940 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:13:10 crc kubenswrapper[4932]: I1125 10:13:10.799290 4932 generic.go:334] "Generic (PLEG): container finished" podID="cce306ed-cc41-4903-ad78-edc818f19fdc" containerID="39e91743d461a371594029d395bc50224fadf8be8f11dde72c29f5466fe2256c" exitCode=0 Nov 25 10:13:10 crc kubenswrapper[4932]: I1125 10:13:10.799379 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54564445dc-vjls5" event={"ID":"cce306ed-cc41-4903-ad78-edc818f19fdc","Type":"ContainerDied","Data":"39e91743d461a371594029d395bc50224fadf8be8f11dde72c29f5466fe2256c"} Nov 25 10:13:11 crc kubenswrapper[4932]: I1125 10:13:11.810171 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54564445dc-vjls5" event={"ID":"cce306ed-cc41-4903-ad78-edc818f19fdc","Type":"ContainerStarted","Data":"8bdbec87ec2659240016debd2e622b873371c12192fa78f97f9d4b28b7481223"} Nov 25 10:13:11 crc kubenswrapper[4932]: I1125 10:13:11.810672 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:11 crc kubenswrapper[4932]: I1125 10:13:11.840031 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54564445dc-vjls5" podStartSLOduration=3.840004371 podStartE2EDuration="3.840004371s" podCreationTimestamp="2025-11-25 10:13:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:13:11.830618252 +0000 UTC m=+5051.956647855" watchObservedRunningTime="2025-11-25 10:13:11.840004371 +0000 UTC m=+5051.966033944" Nov 25 10:13:13 crc kubenswrapper[4932]: I1125 10:13:13.612672 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="798d2263-61ba-4aa7-ba96-9971ee1080a8" containerName="rabbitmq" containerID="cri-o://7dba1df34f66955c81326ceaa81bbf678abc5b43713788203fc8f022e0eb56e7" gracePeriod=604797 Nov 25 10:13:14 crc kubenswrapper[4932]: I1125 10:13:14.379842 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="798d2263-61ba-4aa7-ba96-9971ee1080a8" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.249:5671: connect: connection refused" Nov 25 10:13:14 crc kubenswrapper[4932]: I1125 10:13:14.739624 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="1225a271-403f-4e91-b5b2-7b8a4aaae855" containerName="rabbitmq" containerID="cri-o://43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32" gracePeriod=604796 Nov 25 10:13:17 crc kubenswrapper[4932]: I1125 10:13:17.607723 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:13:17 crc kubenswrapper[4932]: E1125 10:13:17.608702 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.173360 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.241250 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-7l4p8"] Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.241572 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" podUID="35672445-3ce5-46a4-ab4c-d374506e1c6a" containerName="dnsmasq-dns" containerID="cri-o://99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf" gracePeriod=10 Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.636793 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.722619 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-dns-svc\") pod \"35672445-3ce5-46a4-ab4c-d374506e1c6a\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.722793 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8ll8\" (UniqueName: \"kubernetes.io/projected/35672445-3ce5-46a4-ab4c-d374506e1c6a-kube-api-access-z8ll8\") pod \"35672445-3ce5-46a4-ab4c-d374506e1c6a\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.722838 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-config\") pod \"35672445-3ce5-46a4-ab4c-d374506e1c6a\" (UID: \"35672445-3ce5-46a4-ab4c-d374506e1c6a\") " Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.734487 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35672445-3ce5-46a4-ab4c-d374506e1c6a-kube-api-access-z8ll8" (OuterVolumeSpecName: "kube-api-access-z8ll8") pod "35672445-3ce5-46a4-ab4c-d374506e1c6a" (UID: "35672445-3ce5-46a4-ab4c-d374506e1c6a"). InnerVolumeSpecName "kube-api-access-z8ll8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.758530 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "35672445-3ce5-46a4-ab4c-d374506e1c6a" (UID: "35672445-3ce5-46a4-ab4c-d374506e1c6a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.785583 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-config" (OuterVolumeSpecName: "config") pod "35672445-3ce5-46a4-ab4c-d374506e1c6a" (UID: "35672445-3ce5-46a4-ab4c-d374506e1c6a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.825319 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8ll8\" (UniqueName: \"kubernetes.io/projected/35672445-3ce5-46a4-ab4c-d374506e1c6a-kube-api-access-z8ll8\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.825363 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.825373 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/35672445-3ce5-46a4-ab4c-d374506e1c6a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.881166 4932 generic.go:334] "Generic (PLEG): container finished" podID="35672445-3ce5-46a4-ab4c-d374506e1c6a" containerID="99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf" exitCode=0 Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.881277 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" event={"ID":"35672445-3ce5-46a4-ab4c-d374506e1c6a","Type":"ContainerDied","Data":"99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf"} Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.881316 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" event={"ID":"35672445-3ce5-46a4-ab4c-d374506e1c6a","Type":"ContainerDied","Data":"20651ac3531b53c9aa43b667e86caa27826e7ad41f91392fc372863ee55bea72"} Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.881342 4932 scope.go:117] "RemoveContainer" containerID="99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.881491 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c6c64b5c-7l4p8" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.884861 4932 generic.go:334] "Generic (PLEG): container finished" podID="798d2263-61ba-4aa7-ba96-9971ee1080a8" containerID="7dba1df34f66955c81326ceaa81bbf678abc5b43713788203fc8f022e0eb56e7" exitCode=0 Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.884887 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"798d2263-61ba-4aa7-ba96-9971ee1080a8","Type":"ContainerDied","Data":"7dba1df34f66955c81326ceaa81bbf678abc5b43713788203fc8f022e0eb56e7"} Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.905612 4932 scope.go:117] "RemoveContainer" containerID="687a3f755d8b79f5a12bdf00666684261ac455d4122e2ecd18d477f4d4e8996e" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.918990 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-7l4p8"] Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.923368 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-7l4p8"] Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.957613 4932 scope.go:117] "RemoveContainer" containerID="99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf" Nov 25 10:13:19 crc kubenswrapper[4932]: E1125 10:13:19.960051 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf\": container with ID starting with 99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf not found: ID does not exist" containerID="99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.960148 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf"} err="failed to get container status \"99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf\": rpc error: code = NotFound desc = could not find container \"99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf\": container with ID starting with 99cf79829e3ba04303d2d7d11c14daf4017ab3535726db2f8db54f862f2089bf not found: ID does not exist" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.960225 4932 scope.go:117] "RemoveContainer" containerID="687a3f755d8b79f5a12bdf00666684261ac455d4122e2ecd18d477f4d4e8996e" Nov 25 10:13:19 crc kubenswrapper[4932]: E1125 10:13:19.960654 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"687a3f755d8b79f5a12bdf00666684261ac455d4122e2ecd18d477f4d4e8996e\": container with ID starting with 687a3f755d8b79f5a12bdf00666684261ac455d4122e2ecd18d477f4d4e8996e not found: ID does not exist" containerID="687a3f755d8b79f5a12bdf00666684261ac455d4122e2ecd18d477f4d4e8996e" Nov 25 10:13:19 crc kubenswrapper[4932]: I1125 10:13:19.960705 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"687a3f755d8b79f5a12bdf00666684261ac455d4122e2ecd18d477f4d4e8996e"} err="failed to get container status \"687a3f755d8b79f5a12bdf00666684261ac455d4122e2ecd18d477f4d4e8996e\": rpc error: code = NotFound desc = could not find container \"687a3f755d8b79f5a12bdf00666684261ac455d4122e2ecd18d477f4d4e8996e\": container with ID starting with 687a3f755d8b79f5a12bdf00666684261ac455d4122e2ecd18d477f4d4e8996e not found: ID does not exist" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.128563 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.232015 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-tls\") pod \"798d2263-61ba-4aa7-ba96-9971ee1080a8\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.232084 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lslnn\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-kube-api-access-lslnn\") pod \"798d2263-61ba-4aa7-ba96-9971ee1080a8\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.232119 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/798d2263-61ba-4aa7-ba96-9971ee1080a8-erlang-cookie-secret\") pod \"798d2263-61ba-4aa7-ba96-9971ee1080a8\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.232169 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-plugins-conf\") pod \"798d2263-61ba-4aa7-ba96-9971ee1080a8\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.232293 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-plugins\") pod \"798d2263-61ba-4aa7-ba96-9971ee1080a8\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.232314 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/798d2263-61ba-4aa7-ba96-9971ee1080a8-pod-info\") pod \"798d2263-61ba-4aa7-ba96-9971ee1080a8\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.232332 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-erlang-cookie\") pod \"798d2263-61ba-4aa7-ba96-9971ee1080a8\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.232365 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-confd\") pod \"798d2263-61ba-4aa7-ba96-9971ee1080a8\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.232489 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\") pod \"798d2263-61ba-4aa7-ba96-9971ee1080a8\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.232515 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-server-conf\") pod \"798d2263-61ba-4aa7-ba96-9971ee1080a8\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.232542 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-config-data\") pod \"798d2263-61ba-4aa7-ba96-9971ee1080a8\" (UID: \"798d2263-61ba-4aa7-ba96-9971ee1080a8\") " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.233705 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "798d2263-61ba-4aa7-ba96-9971ee1080a8" (UID: "798d2263-61ba-4aa7-ba96-9971ee1080a8"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.234149 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "798d2263-61ba-4aa7-ba96-9971ee1080a8" (UID: "798d2263-61ba-4aa7-ba96-9971ee1080a8"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.234269 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "798d2263-61ba-4aa7-ba96-9971ee1080a8" (UID: "798d2263-61ba-4aa7-ba96-9971ee1080a8"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.235362 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "798d2263-61ba-4aa7-ba96-9971ee1080a8" (UID: "798d2263-61ba-4aa7-ba96-9971ee1080a8"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.235725 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/798d2263-61ba-4aa7-ba96-9971ee1080a8-pod-info" (OuterVolumeSpecName: "pod-info") pod "798d2263-61ba-4aa7-ba96-9971ee1080a8" (UID: "798d2263-61ba-4aa7-ba96-9971ee1080a8"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.236410 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/798d2263-61ba-4aa7-ba96-9971ee1080a8-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "798d2263-61ba-4aa7-ba96-9971ee1080a8" (UID: "798d2263-61ba-4aa7-ba96-9971ee1080a8"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.237518 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-kube-api-access-lslnn" (OuterVolumeSpecName: "kube-api-access-lslnn") pod "798d2263-61ba-4aa7-ba96-9971ee1080a8" (UID: "798d2263-61ba-4aa7-ba96-9971ee1080a8"). InnerVolumeSpecName "kube-api-access-lslnn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.244174 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b" (OuterVolumeSpecName: "persistence") pod "798d2263-61ba-4aa7-ba96-9971ee1080a8" (UID: "798d2263-61ba-4aa7-ba96-9971ee1080a8"). InnerVolumeSpecName "pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.251985 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-config-data" (OuterVolumeSpecName: "config-data") pod "798d2263-61ba-4aa7-ba96-9971ee1080a8" (UID: "798d2263-61ba-4aa7-ba96-9971ee1080a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.277499 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-server-conf" (OuterVolumeSpecName: "server-conf") pod "798d2263-61ba-4aa7-ba96-9971ee1080a8" (UID: "798d2263-61ba-4aa7-ba96-9971ee1080a8"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.306102 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "798d2263-61ba-4aa7-ba96-9971ee1080a8" (UID: "798d2263-61ba-4aa7-ba96-9971ee1080a8"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.334064 4932 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/798d2263-61ba-4aa7-ba96-9971ee1080a8-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.334278 4932 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.334338 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.334390 4932 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/798d2263-61ba-4aa7-ba96-9971ee1080a8-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.334441 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.334517 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.334602 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\") on node \"crc\" " Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.334661 4932 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.334718 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/798d2263-61ba-4aa7-ba96-9971ee1080a8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.334781 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.334862 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lslnn\" (UniqueName: \"kubernetes.io/projected/798d2263-61ba-4aa7-ba96-9971ee1080a8-kube-api-access-lslnn\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.367079 4932 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.367341 4932 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b") on node "crc" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.435945 4932 reconciler_common.go:293] "Volume detached for volume \"pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.616506 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35672445-3ce5-46a4-ab4c-d374506e1c6a" path="/var/lib/kubelet/pods/35672445-3ce5-46a4-ab4c-d374506e1c6a/volumes" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.899053 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"798d2263-61ba-4aa7-ba96-9971ee1080a8","Type":"ContainerDied","Data":"0cfcbd72f74d41ed5f37ba6fd4513e685e87099bc92fbd0339715f9dddf4e44b"} Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.899369 4932 scope.go:117] "RemoveContainer" containerID="7dba1df34f66955c81326ceaa81bbf678abc5b43713788203fc8f022e0eb56e7" Nov 25 10:13:20 crc kubenswrapper[4932]: I1125 10:13:20.899142 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.005880 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.011841 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.037760 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:13:21 crc kubenswrapper[4932]: E1125 10:13:21.038046 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35672445-3ce5-46a4-ab4c-d374506e1c6a" containerName="init" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.038058 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="35672445-3ce5-46a4-ab4c-d374506e1c6a" containerName="init" Nov 25 10:13:21 crc kubenswrapper[4932]: E1125 10:13:21.038076 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="798d2263-61ba-4aa7-ba96-9971ee1080a8" containerName="setup-container" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.038082 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="798d2263-61ba-4aa7-ba96-9971ee1080a8" containerName="setup-container" Nov 25 10:13:21 crc kubenswrapper[4932]: E1125 10:13:21.038094 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="798d2263-61ba-4aa7-ba96-9971ee1080a8" containerName="rabbitmq" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.038101 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="798d2263-61ba-4aa7-ba96-9971ee1080a8" containerName="rabbitmq" Nov 25 10:13:21 crc kubenswrapper[4932]: E1125 10:13:21.038115 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35672445-3ce5-46a4-ab4c-d374506e1c6a" containerName="dnsmasq-dns" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.038121 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="35672445-3ce5-46a4-ab4c-d374506e1c6a" containerName="dnsmasq-dns" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.038273 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="35672445-3ce5-46a4-ab4c-d374506e1c6a" containerName="dnsmasq-dns" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.038282 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="798d2263-61ba-4aa7-ba96-9971ee1080a8" containerName="rabbitmq" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.039000 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.042100 4932 scope.go:117] "RemoveContainer" containerID="6fa72973c64c22eb34370b5cfe31d2b3108f9dad36d2d61ad5c7febc8c2cf4f3" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.042701 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.043248 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.043822 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.044552 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.045335 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.045390 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wftr4" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.045804 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.052826 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.249679 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.249776 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/43a4b55d-081c-4fa0-919e-ae7a526afd10-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.249857 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/43a4b55d-081c-4fa0-919e-ae7a526afd10-pod-info\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.249903 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.250108 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/43a4b55d-081c-4fa0-919e-ae7a526afd10-config-data\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.250155 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/43a4b55d-081c-4fa0-919e-ae7a526afd10-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.250487 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ln5bc\" (UniqueName: \"kubernetes.io/projected/43a4b55d-081c-4fa0-919e-ae7a526afd10-kube-api-access-ln5bc\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.250546 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.250735 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/43a4b55d-081c-4fa0-919e-ae7a526afd10-server-conf\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.250818 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.250994 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.327128 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.352988 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ln5bc\" (UniqueName: \"kubernetes.io/projected/43a4b55d-081c-4fa0-919e-ae7a526afd10-kube-api-access-ln5bc\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.353052 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.353105 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/43a4b55d-081c-4fa0-919e-ae7a526afd10-server-conf\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.353141 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.353204 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.353227 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.353252 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/43a4b55d-081c-4fa0-919e-ae7a526afd10-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.353281 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/43a4b55d-081c-4fa0-919e-ae7a526afd10-pod-info\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.353304 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.353351 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/43a4b55d-081c-4fa0-919e-ae7a526afd10-config-data\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.353371 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/43a4b55d-081c-4fa0-919e-ae7a526afd10-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.356571 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.357075 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/43a4b55d-081c-4fa0-919e-ae7a526afd10-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.357285 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.357321 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/7700a96a3d5894472790ba97f33bb0f7ed26f80d7ce59f029dac81014fa38d08/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.357533 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/43a4b55d-081c-4fa0-919e-ae7a526afd10-config-data\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.358668 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/43a4b55d-081c-4fa0-919e-ae7a526afd10-server-conf\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.359185 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.361783 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/43a4b55d-081c-4fa0-919e-ae7a526afd10-pod-info\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.361864 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.379964 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/43a4b55d-081c-4fa0-919e-ae7a526afd10-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.385828 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/43a4b55d-081c-4fa0-919e-ae7a526afd10-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.396043 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ln5bc\" (UniqueName: \"kubernetes.io/projected/43a4b55d-081c-4fa0-919e-ae7a526afd10-kube-api-access-ln5bc\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.416804 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-92e8fbfe-531d-4cd6-8b75-3bba7debc49b\") pod \"rabbitmq-server-0\" (UID: \"43a4b55d-081c-4fa0-919e-ae7a526afd10\") " pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.454611 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b6clv\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-kube-api-access-b6clv\") pod \"1225a271-403f-4e91-b5b2-7b8a4aaae855\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.454963 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\") pod \"1225a271-403f-4e91-b5b2-7b8a4aaae855\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.456869 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-plugins\") pod \"1225a271-403f-4e91-b5b2-7b8a4aaae855\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.456917 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-erlang-cookie\") pod \"1225a271-403f-4e91-b5b2-7b8a4aaae855\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.456939 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1225a271-403f-4e91-b5b2-7b8a4aaae855-pod-info\") pod \"1225a271-403f-4e91-b5b2-7b8a4aaae855\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.456957 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-server-conf\") pod \"1225a271-403f-4e91-b5b2-7b8a4aaae855\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.456993 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1225a271-403f-4e91-b5b2-7b8a4aaae855-erlang-cookie-secret\") pod \"1225a271-403f-4e91-b5b2-7b8a4aaae855\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.457023 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-confd\") pod \"1225a271-403f-4e91-b5b2-7b8a4aaae855\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.457037 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-plugins-conf\") pod \"1225a271-403f-4e91-b5b2-7b8a4aaae855\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.457056 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-config-data\") pod \"1225a271-403f-4e91-b5b2-7b8a4aaae855\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.457074 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-tls\") pod \"1225a271-403f-4e91-b5b2-7b8a4aaae855\" (UID: \"1225a271-403f-4e91-b5b2-7b8a4aaae855\") " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.458716 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "1225a271-403f-4e91-b5b2-7b8a4aaae855" (UID: "1225a271-403f-4e91-b5b2-7b8a4aaae855"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.459094 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-kube-api-access-b6clv" (OuterVolumeSpecName: "kube-api-access-b6clv") pod "1225a271-403f-4e91-b5b2-7b8a4aaae855" (UID: "1225a271-403f-4e91-b5b2-7b8a4aaae855"). InnerVolumeSpecName "kube-api-access-b6clv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.459717 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "1225a271-403f-4e91-b5b2-7b8a4aaae855" (UID: "1225a271-403f-4e91-b5b2-7b8a4aaae855"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.460349 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "1225a271-403f-4e91-b5b2-7b8a4aaae855" (UID: "1225a271-403f-4e91-b5b2-7b8a4aaae855"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.460822 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "1225a271-403f-4e91-b5b2-7b8a4aaae855" (UID: "1225a271-403f-4e91-b5b2-7b8a4aaae855"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.462620 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1225a271-403f-4e91-b5b2-7b8a4aaae855-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "1225a271-403f-4e91-b5b2-7b8a4aaae855" (UID: "1225a271-403f-4e91-b5b2-7b8a4aaae855"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.470110 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/1225a271-403f-4e91-b5b2-7b8a4aaae855-pod-info" (OuterVolumeSpecName: "pod-info") pod "1225a271-403f-4e91-b5b2-7b8a4aaae855" (UID: "1225a271-403f-4e91-b5b2-7b8a4aaae855"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.474120 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07" (OuterVolumeSpecName: "persistence") pod "1225a271-403f-4e91-b5b2-7b8a4aaae855" (UID: "1225a271-403f-4e91-b5b2-7b8a4aaae855"). InnerVolumeSpecName "pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.489800 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-config-data" (OuterVolumeSpecName: "config-data") pod "1225a271-403f-4e91-b5b2-7b8a4aaae855" (UID: "1225a271-403f-4e91-b5b2-7b8a4aaae855"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.517511 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-server-conf" (OuterVolumeSpecName: "server-conf") pod "1225a271-403f-4e91-b5b2-7b8a4aaae855" (UID: "1225a271-403f-4e91-b5b2-7b8a4aaae855"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.545353 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "1225a271-403f-4e91-b5b2-7b8a4aaae855" (UID: "1225a271-403f-4e91-b5b2-7b8a4aaae855"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.559415 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.559449 4932 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1225a271-403f-4e91-b5b2-7b8a4aaae855-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.559459 4932 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.559468 4932 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1225a271-403f-4e91-b5b2-7b8a4aaae855-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.559476 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.559485 4932 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.559493 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1225a271-403f-4e91-b5b2-7b8a4aaae855-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.559501 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.559511 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b6clv\" (UniqueName: \"kubernetes.io/projected/1225a271-403f-4e91-b5b2-7b8a4aaae855-kube-api-access-b6clv\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.559544 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\") on node \"crc\" " Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.559554 4932 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1225a271-403f-4e91-b5b2-7b8a4aaae855-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.577672 4932 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.577848 4932 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07") on node "crc" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.662313 4932 reconciler_common.go:293] "Volume detached for volume \"pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\") on node \"crc\" DevicePath \"\"" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.673956 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.905624 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.912460 4932 generic.go:334] "Generic (PLEG): container finished" podID="1225a271-403f-4e91-b5b2-7b8a4aaae855" containerID="43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32" exitCode=0 Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.912527 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1225a271-403f-4e91-b5b2-7b8a4aaae855","Type":"ContainerDied","Data":"43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32"} Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.912552 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1225a271-403f-4e91-b5b2-7b8a4aaae855","Type":"ContainerDied","Data":"30005499c83c0b304bd91494eef323d18d61cbc0bdce0afdeecec7e7a2c5edf0"} Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.912547 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.912644 4932 scope.go:117] "RemoveContainer" containerID="43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32" Nov 25 10:13:21 crc kubenswrapper[4932]: W1125 10:13:21.921292 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod43a4b55d_081c_4fa0_919e_ae7a526afd10.slice/crio-784d68caf14954e9c14fe64da558aae12f3330533445808b2ac12857085f41ee WatchSource:0}: Error finding container 784d68caf14954e9c14fe64da558aae12f3330533445808b2ac12857085f41ee: Status 404 returned error can't find the container with id 784d68caf14954e9c14fe64da558aae12f3330533445808b2ac12857085f41ee Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.936233 4932 scope.go:117] "RemoveContainer" containerID="9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.941934 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.947749 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.970286 4932 scope.go:117] "RemoveContainer" containerID="43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.970287 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:13:21 crc kubenswrapper[4932]: E1125 10:13:21.970784 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1225a271-403f-4e91-b5b2-7b8a4aaae855" containerName="setup-container" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.970813 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1225a271-403f-4e91-b5b2-7b8a4aaae855" containerName="setup-container" Nov 25 10:13:21 crc kubenswrapper[4932]: E1125 10:13:21.970836 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1225a271-403f-4e91-b5b2-7b8a4aaae855" containerName="rabbitmq" Nov 25 10:13:21 crc kubenswrapper[4932]: E1125 10:13:21.970832 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32\": container with ID starting with 43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32 not found: ID does not exist" containerID="43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.970872 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32"} err="failed to get container status \"43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32\": rpc error: code = NotFound desc = could not find container \"43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32\": container with ID starting with 43096e9f98afffffa506e4a84434d539a7416f04835428e70eb6925ec36b8a32 not found: ID does not exist" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.970903 4932 scope.go:117] "RemoveContainer" containerID="9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.970846 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1225a271-403f-4e91-b5b2-7b8a4aaae855" containerName="rabbitmq" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.971126 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="1225a271-403f-4e91-b5b2-7b8a4aaae855" containerName="rabbitmq" Nov 25 10:13:21 crc kubenswrapper[4932]: E1125 10:13:21.971212 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70\": container with ID starting with 9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70 not found: ID does not exist" containerID="9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.971240 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70"} err="failed to get container status \"9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70\": rpc error: code = NotFound desc = could not find container \"9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70\": container with ID starting with 9209a3226db8a7a81291d49818a394c11eb0e104aff26c6678df72ec56ab3d70 not found: ID does not exist" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.972077 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:21 crc kubenswrapper[4932]: I1125 10:13:21.975546 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:21.977409 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-rm8hg" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:21.977939 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:21.978330 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:21.978482 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:21.978746 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:21.978916 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:21.985489 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.168942 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.168985 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.169015 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nxnt\" (UniqueName: \"kubernetes.io/projected/ae85197a-d847-4051-b643-a65b7757af92-kube-api-access-4nxnt\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.169077 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.169135 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ae85197a-d847-4051-b643-a65b7757af92-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.169170 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ae85197a-d847-4051-b643-a65b7757af92-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.169260 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.169285 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ae85197a-d847-4051-b643-a65b7757af92-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.169301 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ae85197a-d847-4051-b643-a65b7757af92-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.169320 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.169339 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ae85197a-d847-4051-b643-a65b7757af92-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.270376 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.270432 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.270463 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nxnt\" (UniqueName: \"kubernetes.io/projected/ae85197a-d847-4051-b643-a65b7757af92-kube-api-access-4nxnt\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.270523 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.270548 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ae85197a-d847-4051-b643-a65b7757af92-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.270579 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ae85197a-d847-4051-b643-a65b7757af92-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.270616 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.270643 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ae85197a-d847-4051-b643-a65b7757af92-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.270667 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ae85197a-d847-4051-b643-a65b7757af92-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.270691 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.270713 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ae85197a-d847-4051-b643-a65b7757af92-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.271868 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.272399 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.272862 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ae85197a-d847-4051-b643-a65b7757af92-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.272911 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/ae85197a-d847-4051-b643-a65b7757af92-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.273482 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/ae85197a-d847-4051-b643-a65b7757af92-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.274559 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.275276 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/ae85197a-d847-4051-b643-a65b7757af92-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.275616 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.275643 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6ba10319f446a003ed4fcd0002ff9f0efb18640d00179771f05b30d41653cbb4/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.275801 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/ae85197a-d847-4051-b643-a65b7757af92-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.277079 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/ae85197a-d847-4051-b643-a65b7757af92-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.289144 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nxnt\" (UniqueName: \"kubernetes.io/projected/ae85197a-d847-4051-b643-a65b7757af92-kube-api-access-4nxnt\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.322745 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1f17078b-7ac6-47fe-9c92-98d7991e1c07\") pod \"rabbitmq-cell1-server-0\" (UID: \"ae85197a-d847-4051-b643-a65b7757af92\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.355064 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.619155 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1225a271-403f-4e91-b5b2-7b8a4aaae855" path="/var/lib/kubelet/pods/1225a271-403f-4e91-b5b2-7b8a4aaae855/volumes" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.620221 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="798d2263-61ba-4aa7-ba96-9971ee1080a8" path="/var/lib/kubelet/pods/798d2263-61ba-4aa7-ba96-9971ee1080a8/volumes" Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.793660 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 10:13:22 crc kubenswrapper[4932]: W1125 10:13:22.801989 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae85197a_d847_4051_b643_a65b7757af92.slice/crio-2dae4672fbba2c1030d0c2ace4b6357576387bc03b9ee39084d10aa4533c308a WatchSource:0}: Error finding container 2dae4672fbba2c1030d0c2ace4b6357576387bc03b9ee39084d10aa4533c308a: Status 404 returned error can't find the container with id 2dae4672fbba2c1030d0c2ace4b6357576387bc03b9ee39084d10aa4533c308a Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.929260 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"43a4b55d-081c-4fa0-919e-ae7a526afd10","Type":"ContainerStarted","Data":"784d68caf14954e9c14fe64da558aae12f3330533445808b2ac12857085f41ee"} Nov 25 10:13:22 crc kubenswrapper[4932]: I1125 10:13:22.930542 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ae85197a-d847-4051-b643-a65b7757af92","Type":"ContainerStarted","Data":"2dae4672fbba2c1030d0c2ace4b6357576387bc03b9ee39084d10aa4533c308a"} Nov 25 10:13:23 crc kubenswrapper[4932]: I1125 10:13:23.944427 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"43a4b55d-081c-4fa0-919e-ae7a526afd10","Type":"ContainerStarted","Data":"11358f041ccddead1ae14f69e01b431833320f54836d6bbdd279381393c45594"} Nov 25 10:13:24 crc kubenswrapper[4932]: I1125 10:13:24.958050 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ae85197a-d847-4051-b643-a65b7757af92","Type":"ContainerStarted","Data":"a2dd7f35c6eee883f27f2588191ad5e70d4429b912a304e14049d465c4836770"} Nov 25 10:13:28 crc kubenswrapper[4932]: I1125 10:13:28.606060 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:13:28 crc kubenswrapper[4932]: E1125 10:13:28.606747 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:13:40 crc kubenswrapper[4932]: I1125 10:13:40.610491 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:13:40 crc kubenswrapper[4932]: E1125 10:13:40.611306 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.037103 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tvjh6"] Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.039597 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.084914 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tvjh6"] Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.144779 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhv69\" (UniqueName: \"kubernetes.io/projected/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-kube-api-access-qhv69\") pod \"certified-operators-tvjh6\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.144958 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-catalog-content\") pod \"certified-operators-tvjh6\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.145202 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-utilities\") pod \"certified-operators-tvjh6\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.246501 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-catalog-content\") pod \"certified-operators-tvjh6\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.246597 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-utilities\") pod \"certified-operators-tvjh6\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.246690 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhv69\" (UniqueName: \"kubernetes.io/projected/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-kube-api-access-qhv69\") pod \"certified-operators-tvjh6\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.247115 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-catalog-content\") pod \"certified-operators-tvjh6\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.247176 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-utilities\") pod \"certified-operators-tvjh6\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.274155 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhv69\" (UniqueName: \"kubernetes.io/projected/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-kube-api-access-qhv69\") pod \"certified-operators-tvjh6\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.360653 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:13:52 crc kubenswrapper[4932]: I1125 10:13:52.848808 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tvjh6"] Nov 25 10:13:53 crc kubenswrapper[4932]: I1125 10:13:53.193434 4932 generic.go:334] "Generic (PLEG): container finished" podID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" containerID="367e1a3db9acc92a4d60550f61262c33be8dc818cb4520f9412626b46dad698f" exitCode=0 Nov 25 10:13:53 crc kubenswrapper[4932]: I1125 10:13:53.193533 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvjh6" event={"ID":"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1","Type":"ContainerDied","Data":"367e1a3db9acc92a4d60550f61262c33be8dc818cb4520f9412626b46dad698f"} Nov 25 10:13:53 crc kubenswrapper[4932]: I1125 10:13:53.193850 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvjh6" event={"ID":"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1","Type":"ContainerStarted","Data":"a5ad4ae815b364fa548c9a361ab8ba7bd58aeeebeac8b17bc075559c549a050a"} Nov 25 10:13:54 crc kubenswrapper[4932]: I1125 10:13:54.205025 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvjh6" event={"ID":"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1","Type":"ContainerStarted","Data":"bc4f27e7b2e71ce2356c6d1fedf053a302a60430d794fe9fd98a38c44d206983"} Nov 25 10:13:55 crc kubenswrapper[4932]: I1125 10:13:55.213036 4932 generic.go:334] "Generic (PLEG): container finished" podID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" containerID="bc4f27e7b2e71ce2356c6d1fedf053a302a60430d794fe9fd98a38c44d206983" exitCode=0 Nov 25 10:13:55 crc kubenswrapper[4932]: I1125 10:13:55.213101 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvjh6" event={"ID":"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1","Type":"ContainerDied","Data":"bc4f27e7b2e71ce2356c6d1fedf053a302a60430d794fe9fd98a38c44d206983"} Nov 25 10:13:55 crc kubenswrapper[4932]: I1125 10:13:55.606651 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:13:55 crc kubenswrapper[4932]: E1125 10:13:55.606989 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:13:56 crc kubenswrapper[4932]: I1125 10:13:56.227238 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvjh6" event={"ID":"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1","Type":"ContainerStarted","Data":"f822390914a9e04640a4f4f60758462ba1e63d9361607f08c4b7504cb17d2142"} Nov 25 10:13:56 crc kubenswrapper[4932]: I1125 10:13:56.229420 4932 generic.go:334] "Generic (PLEG): container finished" podID="43a4b55d-081c-4fa0-919e-ae7a526afd10" containerID="11358f041ccddead1ae14f69e01b431833320f54836d6bbdd279381393c45594" exitCode=0 Nov 25 10:13:56 crc kubenswrapper[4932]: I1125 10:13:56.229460 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"43a4b55d-081c-4fa0-919e-ae7a526afd10","Type":"ContainerDied","Data":"11358f041ccddead1ae14f69e01b431833320f54836d6bbdd279381393c45594"} Nov 25 10:13:56 crc kubenswrapper[4932]: I1125 10:13:56.252290 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tvjh6" podStartSLOduration=1.8397518430000002 podStartE2EDuration="4.252266895s" podCreationTimestamp="2025-11-25 10:13:52 +0000 UTC" firstStartedPulling="2025-11-25 10:13:53.195140092 +0000 UTC m=+5093.321169675" lastFinishedPulling="2025-11-25 10:13:55.607655154 +0000 UTC m=+5095.733684727" observedRunningTime="2025-11-25 10:13:56.246456479 +0000 UTC m=+5096.372486062" watchObservedRunningTime="2025-11-25 10:13:56.252266895 +0000 UTC m=+5096.378296468" Nov 25 10:13:57 crc kubenswrapper[4932]: I1125 10:13:57.238094 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"43a4b55d-081c-4fa0-919e-ae7a526afd10","Type":"ContainerStarted","Data":"9900028281ecd7ae31eb81553a0808ba5c7b05c1708a181b3aa1e4f03ef07f3f"} Nov 25 10:13:57 crc kubenswrapper[4932]: I1125 10:13:57.238604 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 10:13:57 crc kubenswrapper[4932]: I1125 10:13:57.262374 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.26235407 podStartE2EDuration="36.26235407s" podCreationTimestamp="2025-11-25 10:13:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:13:57.260863167 +0000 UTC m=+5097.386892740" watchObservedRunningTime="2025-11-25 10:13:57.26235407 +0000 UTC m=+5097.388383633" Nov 25 10:13:58 crc kubenswrapper[4932]: I1125 10:13:58.249152 4932 generic.go:334] "Generic (PLEG): container finished" podID="ae85197a-d847-4051-b643-a65b7757af92" containerID="a2dd7f35c6eee883f27f2588191ad5e70d4429b912a304e14049d465c4836770" exitCode=0 Nov 25 10:13:58 crc kubenswrapper[4932]: I1125 10:13:58.250424 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ae85197a-d847-4051-b643-a65b7757af92","Type":"ContainerDied","Data":"a2dd7f35c6eee883f27f2588191ad5e70d4429b912a304e14049d465c4836770"} Nov 25 10:13:59 crc kubenswrapper[4932]: I1125 10:13:59.258865 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"ae85197a-d847-4051-b643-a65b7757af92","Type":"ContainerStarted","Data":"6fbb9c4b34f1b722ad29429ec09f7abb9c56d5f35de161b9f9199b7270075bc5"} Nov 25 10:13:59 crc kubenswrapper[4932]: I1125 10:13:59.259849 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:13:59 crc kubenswrapper[4932]: I1125 10:13:59.287903 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.287881472 podStartE2EDuration="38.287881472s" podCreationTimestamp="2025-11-25 10:13:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:13:59.282138367 +0000 UTC m=+5099.408167940" watchObservedRunningTime="2025-11-25 10:13:59.287881472 +0000 UTC m=+5099.413911035" Nov 25 10:14:02 crc kubenswrapper[4932]: I1125 10:14:02.367911 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:14:02 crc kubenswrapper[4932]: I1125 10:14:02.369433 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:14:02 crc kubenswrapper[4932]: I1125 10:14:02.420485 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:14:03 crc kubenswrapper[4932]: I1125 10:14:03.335973 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:14:03 crc kubenswrapper[4932]: I1125 10:14:03.414864 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tvjh6"] Nov 25 10:14:05 crc kubenswrapper[4932]: I1125 10:14:05.301900 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tvjh6" podUID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" containerName="registry-server" containerID="cri-o://f822390914a9e04640a4f4f60758462ba1e63d9361607f08c4b7504cb17d2142" gracePeriod=2 Nov 25 10:14:06 crc kubenswrapper[4932]: I1125 10:14:06.315355 4932 generic.go:334] "Generic (PLEG): container finished" podID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" containerID="f822390914a9e04640a4f4f60758462ba1e63d9361607f08c4b7504cb17d2142" exitCode=0 Nov 25 10:14:06 crc kubenswrapper[4932]: I1125 10:14:06.315597 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvjh6" event={"ID":"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1","Type":"ContainerDied","Data":"f822390914a9e04640a4f4f60758462ba1e63d9361607f08c4b7504cb17d2142"} Nov 25 10:14:06 crc kubenswrapper[4932]: I1125 10:14:06.605819 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:14:06 crc kubenswrapper[4932]: E1125 10:14:06.606127 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.073865 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.198364 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-utilities\") pod \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.198534 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-catalog-content\") pod \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.198569 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qhv69\" (UniqueName: \"kubernetes.io/projected/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-kube-api-access-qhv69\") pod \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\" (UID: \"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1\") " Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.199591 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-utilities" (OuterVolumeSpecName: "utilities") pod "bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" (UID: "bd5a2c09-65e0-48cc-abb6-4c0843cecbf1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.204651 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-kube-api-access-qhv69" (OuterVolumeSpecName: "kube-api-access-qhv69") pod "bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" (UID: "bd5a2c09-65e0-48cc-abb6-4c0843cecbf1"). InnerVolumeSpecName "kube-api-access-qhv69". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.243457 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" (UID: "bd5a2c09-65e0-48cc-abb6-4c0843cecbf1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.300681 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.300737 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qhv69\" (UniqueName: \"kubernetes.io/projected/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-kube-api-access-qhv69\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.300756 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.329327 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tvjh6" event={"ID":"bd5a2c09-65e0-48cc-abb6-4c0843cecbf1","Type":"ContainerDied","Data":"a5ad4ae815b364fa548c9a361ab8ba7bd58aeeebeac8b17bc075559c549a050a"} Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.329381 4932 scope.go:117] "RemoveContainer" containerID="f822390914a9e04640a4f4f60758462ba1e63d9361607f08c4b7504cb17d2142" Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.329458 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tvjh6" Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.411746 4932 scope.go:117] "RemoveContainer" containerID="bc4f27e7b2e71ce2356c6d1fedf053a302a60430d794fe9fd98a38c44d206983" Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.425367 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tvjh6"] Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.427721 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tvjh6"] Nov 25 10:14:07 crc kubenswrapper[4932]: I1125 10:14:07.478151 4932 scope.go:117] "RemoveContainer" containerID="367e1a3db9acc92a4d60550f61262c33be8dc818cb4520f9412626b46dad698f" Nov 25 10:14:08 crc kubenswrapper[4932]: I1125 10:14:08.619382 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" path="/var/lib/kubelet/pods/bd5a2c09-65e0-48cc-abb6-4c0843cecbf1/volumes" Nov 25 10:14:11 crc kubenswrapper[4932]: I1125 10:14:11.677475 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 10:14:12 crc kubenswrapper[4932]: I1125 10:14:12.359717 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 10:14:16 crc kubenswrapper[4932]: I1125 10:14:16.726302 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1-default"] Nov 25 10:14:16 crc kubenswrapper[4932]: E1125 10:14:16.728505 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" containerName="registry-server" Nov 25 10:14:16 crc kubenswrapper[4932]: I1125 10:14:16.728672 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" containerName="registry-server" Nov 25 10:14:16 crc kubenswrapper[4932]: E1125 10:14:16.728773 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" containerName="extract-content" Nov 25 10:14:16 crc kubenswrapper[4932]: I1125 10:14:16.728844 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" containerName="extract-content" Nov 25 10:14:16 crc kubenswrapper[4932]: E1125 10:14:16.729034 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" containerName="extract-utilities" Nov 25 10:14:16 crc kubenswrapper[4932]: I1125 10:14:16.729149 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" containerName="extract-utilities" Nov 25 10:14:16 crc kubenswrapper[4932]: I1125 10:14:16.729673 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd5a2c09-65e0-48cc-abb6-4c0843cecbf1" containerName="registry-server" Nov 25 10:14:16 crc kubenswrapper[4932]: I1125 10:14:16.730396 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 25 10:14:16 crc kubenswrapper[4932]: I1125 10:14:16.734050 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 25 10:14:16 crc kubenswrapper[4932]: I1125 10:14:16.735573 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-tmn25" Nov 25 10:14:16 crc kubenswrapper[4932]: I1125 10:14:16.862392 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9pjc\" (UniqueName: \"kubernetes.io/projected/8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89-kube-api-access-z9pjc\") pod \"mariadb-client-1-default\" (UID: \"8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89\") " pod="openstack/mariadb-client-1-default" Nov 25 10:14:16 crc kubenswrapper[4932]: I1125 10:14:16.964058 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9pjc\" (UniqueName: \"kubernetes.io/projected/8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89-kube-api-access-z9pjc\") pod \"mariadb-client-1-default\" (UID: \"8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89\") " pod="openstack/mariadb-client-1-default" Nov 25 10:14:16 crc kubenswrapper[4932]: I1125 10:14:16.988956 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9pjc\" (UniqueName: \"kubernetes.io/projected/8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89-kube-api-access-z9pjc\") pod \"mariadb-client-1-default\" (UID: \"8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89\") " pod="openstack/mariadb-client-1-default" Nov 25 10:14:17 crc kubenswrapper[4932]: I1125 10:14:17.063819 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 25 10:14:17 crc kubenswrapper[4932]: I1125 10:14:17.603632 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 25 10:14:17 crc kubenswrapper[4932]: W1125 10:14:17.610528 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8a54ea06_b01f_4fe8_a6fc_c5de46e7bb89.slice/crio-ce0139c31008328ef61a6c22e0c432a3c8780696f4d86bf17b468a5d9ab2850d WatchSource:0}: Error finding container ce0139c31008328ef61a6c22e0c432a3c8780696f4d86bf17b468a5d9ab2850d: Status 404 returned error can't find the container with id ce0139c31008328ef61a6c22e0c432a3c8780696f4d86bf17b468a5d9ab2850d Nov 25 10:14:18 crc kubenswrapper[4932]: I1125 10:14:18.432614 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89","Type":"ContainerStarted","Data":"ce0139c31008328ef61a6c22e0c432a3c8780696f4d86bf17b468a5d9ab2850d"} Nov 25 10:14:19 crc kubenswrapper[4932]: I1125 10:14:19.442980 4932 generic.go:334] "Generic (PLEG): container finished" podID="8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89" containerID="73b50a6a63be3c198b322aeabfef26773e29f835dc0c2cc326fe9be4b35513eb" exitCode=0 Nov 25 10:14:19 crc kubenswrapper[4932]: I1125 10:14:19.443077 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89","Type":"ContainerDied","Data":"73b50a6a63be3c198b322aeabfef26773e29f835dc0c2cc326fe9be4b35513eb"} Nov 25 10:14:20 crc kubenswrapper[4932]: I1125 10:14:20.612326 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:14:20 crc kubenswrapper[4932]: E1125 10:14:20.612624 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:14:20 crc kubenswrapper[4932]: I1125 10:14:20.810436 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 25 10:14:20 crc kubenswrapper[4932]: I1125 10:14:20.840357 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1-default_8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89/mariadb-client-1-default/0.log" Nov 25 10:14:20 crc kubenswrapper[4932]: I1125 10:14:20.870778 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 25 10:14:20 crc kubenswrapper[4932]: I1125 10:14:20.876361 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 25 10:14:20 crc kubenswrapper[4932]: I1125 10:14:20.928780 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9pjc\" (UniqueName: \"kubernetes.io/projected/8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89-kube-api-access-z9pjc\") pod \"8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89\" (UID: \"8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89\") " Nov 25 10:14:20 crc kubenswrapper[4932]: I1125 10:14:20.935843 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89-kube-api-access-z9pjc" (OuterVolumeSpecName: "kube-api-access-z9pjc") pod "8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89" (UID: "8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89"). InnerVolumeSpecName "kube-api-access-z9pjc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.030606 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9pjc\" (UniqueName: \"kubernetes.io/projected/8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89-kube-api-access-z9pjc\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.419326 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2-default"] Nov 25 10:14:21 crc kubenswrapper[4932]: E1125 10:14:21.419900 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89" containerName="mariadb-client-1-default" Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.419937 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89" containerName="mariadb-client-1-default" Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.420330 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89" containerName="mariadb-client-1-default" Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.421256 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.449961 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.459857 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce0139c31008328ef61a6c22e0c432a3c8780696f4d86bf17b468a5d9ab2850d" Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.459920 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.539603 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4r45\" (UniqueName: \"kubernetes.io/projected/2dc7880f-60aa-4063-bbf5-46048a21eea2-kube-api-access-z4r45\") pod \"mariadb-client-2-default\" (UID: \"2dc7880f-60aa-4063-bbf5-46048a21eea2\") " pod="openstack/mariadb-client-2-default" Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.641351 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4r45\" (UniqueName: \"kubernetes.io/projected/2dc7880f-60aa-4063-bbf5-46048a21eea2-kube-api-access-z4r45\") pod \"mariadb-client-2-default\" (UID: \"2dc7880f-60aa-4063-bbf5-46048a21eea2\") " pod="openstack/mariadb-client-2-default" Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.673792 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4r45\" (UniqueName: \"kubernetes.io/projected/2dc7880f-60aa-4063-bbf5-46048a21eea2-kube-api-access-z4r45\") pod \"mariadb-client-2-default\" (UID: \"2dc7880f-60aa-4063-bbf5-46048a21eea2\") " pod="openstack/mariadb-client-2-default" Nov 25 10:14:21 crc kubenswrapper[4932]: I1125 10:14:21.738861 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 25 10:14:22 crc kubenswrapper[4932]: I1125 10:14:22.239133 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 25 10:14:22 crc kubenswrapper[4932]: W1125 10:14:22.241612 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2dc7880f_60aa_4063_bbf5_46048a21eea2.slice/crio-ee27eabc33eee7a5de4bfdb44a27bdc465cf052296831f3cbd04f2655c322c1d WatchSource:0}: Error finding container ee27eabc33eee7a5de4bfdb44a27bdc465cf052296831f3cbd04f2655c322c1d: Status 404 returned error can't find the container with id ee27eabc33eee7a5de4bfdb44a27bdc465cf052296831f3cbd04f2655c322c1d Nov 25 10:14:22 crc kubenswrapper[4932]: I1125 10:14:22.466469 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"2dc7880f-60aa-4063-bbf5-46048a21eea2","Type":"ContainerStarted","Data":"ee27eabc33eee7a5de4bfdb44a27bdc465cf052296831f3cbd04f2655c322c1d"} Nov 25 10:14:22 crc kubenswrapper[4932]: I1125 10:14:22.617485 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89" path="/var/lib/kubelet/pods/8a54ea06-b01f-4fe8-a6fc-c5de46e7bb89/volumes" Nov 25 10:14:23 crc kubenswrapper[4932]: I1125 10:14:23.480740 4932 generic.go:334] "Generic (PLEG): container finished" podID="2dc7880f-60aa-4063-bbf5-46048a21eea2" containerID="49501eb9e126afbb391367340e4ba9d6736fbaa927653fde9494c29f8c97bb97" exitCode=1 Nov 25 10:14:23 crc kubenswrapper[4932]: I1125 10:14:23.480853 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"2dc7880f-60aa-4063-bbf5-46048a21eea2","Type":"ContainerDied","Data":"49501eb9e126afbb391367340e4ba9d6736fbaa927653fde9494c29f8c97bb97"} Nov 25 10:14:24 crc kubenswrapper[4932]: I1125 10:14:24.907394 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 25 10:14:24 crc kubenswrapper[4932]: I1125 10:14:24.928780 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2-default_2dc7880f-60aa-4063-bbf5-46048a21eea2/mariadb-client-2-default/0.log" Nov 25 10:14:24 crc kubenswrapper[4932]: I1125 10:14:24.958951 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 25 10:14:24 crc kubenswrapper[4932]: I1125 10:14:24.964367 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.097329 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4r45\" (UniqueName: \"kubernetes.io/projected/2dc7880f-60aa-4063-bbf5-46048a21eea2-kube-api-access-z4r45\") pod \"2dc7880f-60aa-4063-bbf5-46048a21eea2\" (UID: \"2dc7880f-60aa-4063-bbf5-46048a21eea2\") " Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.104026 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dc7880f-60aa-4063-bbf5-46048a21eea2-kube-api-access-z4r45" (OuterVolumeSpecName: "kube-api-access-z4r45") pod "2dc7880f-60aa-4063-bbf5-46048a21eea2" (UID: "2dc7880f-60aa-4063-bbf5-46048a21eea2"). InnerVolumeSpecName "kube-api-access-z4r45". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.199353 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4r45\" (UniqueName: \"kubernetes.io/projected/2dc7880f-60aa-4063-bbf5-46048a21eea2-kube-api-access-z4r45\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.416477 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1"] Nov 25 10:14:25 crc kubenswrapper[4932]: E1125 10:14:25.417018 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dc7880f-60aa-4063-bbf5-46048a21eea2" containerName="mariadb-client-2-default" Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.417048 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dc7880f-60aa-4063-bbf5-46048a21eea2" containerName="mariadb-client-2-default" Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.417334 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dc7880f-60aa-4063-bbf5-46048a21eea2" containerName="mariadb-client-2-default" Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.418094 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.436092 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.498671 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee27eabc33eee7a5de4bfdb44a27bdc465cf052296831f3cbd04f2655c322c1d" Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.498748 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.605302 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cqbr\" (UniqueName: \"kubernetes.io/projected/535b1e59-24c6-493d-b95e-3ebbeb3b0017-kube-api-access-7cqbr\") pod \"mariadb-client-1\" (UID: \"535b1e59-24c6-493d-b95e-3ebbeb3b0017\") " pod="openstack/mariadb-client-1" Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.707620 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cqbr\" (UniqueName: \"kubernetes.io/projected/535b1e59-24c6-493d-b95e-3ebbeb3b0017-kube-api-access-7cqbr\") pod \"mariadb-client-1\" (UID: \"535b1e59-24c6-493d-b95e-3ebbeb3b0017\") " pod="openstack/mariadb-client-1" Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.726329 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cqbr\" (UniqueName: \"kubernetes.io/projected/535b1e59-24c6-493d-b95e-3ebbeb3b0017-kube-api-access-7cqbr\") pod \"mariadb-client-1\" (UID: \"535b1e59-24c6-493d-b95e-3ebbeb3b0017\") " pod="openstack/mariadb-client-1" Nov 25 10:14:25 crc kubenswrapper[4932]: I1125 10:14:25.751673 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 25 10:14:26 crc kubenswrapper[4932]: I1125 10:14:26.229004 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 25 10:14:26 crc kubenswrapper[4932]: I1125 10:14:26.509597 4932 generic.go:334] "Generic (PLEG): container finished" podID="535b1e59-24c6-493d-b95e-3ebbeb3b0017" containerID="a6dbe64ab2367b875415405c350f7c4d61e02b20f1a65bb56f16be92f5e6784d" exitCode=0 Nov 25 10:14:26 crc kubenswrapper[4932]: I1125 10:14:26.509740 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"535b1e59-24c6-493d-b95e-3ebbeb3b0017","Type":"ContainerDied","Data":"a6dbe64ab2367b875415405c350f7c4d61e02b20f1a65bb56f16be92f5e6784d"} Nov 25 10:14:26 crc kubenswrapper[4932]: I1125 10:14:26.510010 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"535b1e59-24c6-493d-b95e-3ebbeb3b0017","Type":"ContainerStarted","Data":"3fed9f5978d7184bbb02b6ae9e00cd1c607365caa9b5c9b18cb30fdd81e9f552"} Nov 25 10:14:26 crc kubenswrapper[4932]: I1125 10:14:26.623683 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dc7880f-60aa-4063-bbf5-46048a21eea2" path="/var/lib/kubelet/pods/2dc7880f-60aa-4063-bbf5-46048a21eea2/volumes" Nov 25 10:14:27 crc kubenswrapper[4932]: I1125 10:14:27.935621 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 25 10:14:27 crc kubenswrapper[4932]: I1125 10:14:27.960143 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1_535b1e59-24c6-493d-b95e-3ebbeb3b0017/mariadb-client-1/0.log" Nov 25 10:14:27 crc kubenswrapper[4932]: I1125 10:14:27.985618 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1"] Nov 25 10:14:27 crc kubenswrapper[4932]: I1125 10:14:27.988593 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1"] Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.044264 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cqbr\" (UniqueName: \"kubernetes.io/projected/535b1e59-24c6-493d-b95e-3ebbeb3b0017-kube-api-access-7cqbr\") pod \"535b1e59-24c6-493d-b95e-3ebbeb3b0017\" (UID: \"535b1e59-24c6-493d-b95e-3ebbeb3b0017\") " Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.051335 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/535b1e59-24c6-493d-b95e-3ebbeb3b0017-kube-api-access-7cqbr" (OuterVolumeSpecName: "kube-api-access-7cqbr") pod "535b1e59-24c6-493d-b95e-3ebbeb3b0017" (UID: "535b1e59-24c6-493d-b95e-3ebbeb3b0017"). InnerVolumeSpecName "kube-api-access-7cqbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.146327 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cqbr\" (UniqueName: \"kubernetes.io/projected/535b1e59-24c6-493d-b95e-3ebbeb3b0017-kube-api-access-7cqbr\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.371910 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-4-default"] Nov 25 10:14:28 crc kubenswrapper[4932]: E1125 10:14:28.372331 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="535b1e59-24c6-493d-b95e-3ebbeb3b0017" containerName="mariadb-client-1" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.372345 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="535b1e59-24c6-493d-b95e-3ebbeb3b0017" containerName="mariadb-client-1" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.372558 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="535b1e59-24c6-493d-b95e-3ebbeb3b0017" containerName="mariadb-client-1" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.373266 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.378742 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.526213 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3fed9f5978d7184bbb02b6ae9e00cd1c607365caa9b5c9b18cb30fdd81e9f552" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.526824 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.551865 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkbmg\" (UniqueName: \"kubernetes.io/projected/c8e826c0-1bf7-4a09-bbee-ce773f57e343-kube-api-access-hkbmg\") pod \"mariadb-client-4-default\" (UID: \"c8e826c0-1bf7-4a09-bbee-ce773f57e343\") " pod="openstack/mariadb-client-4-default" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.619231 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="535b1e59-24c6-493d-b95e-3ebbeb3b0017" path="/var/lib/kubelet/pods/535b1e59-24c6-493d-b95e-3ebbeb3b0017/volumes" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.653215 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkbmg\" (UniqueName: \"kubernetes.io/projected/c8e826c0-1bf7-4a09-bbee-ce773f57e343-kube-api-access-hkbmg\") pod \"mariadb-client-4-default\" (UID: \"c8e826c0-1bf7-4a09-bbee-ce773f57e343\") " pod="openstack/mariadb-client-4-default" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.678916 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkbmg\" (UniqueName: \"kubernetes.io/projected/c8e826c0-1bf7-4a09-bbee-ce773f57e343-kube-api-access-hkbmg\") pod \"mariadb-client-4-default\" (UID: \"c8e826c0-1bf7-4a09-bbee-ce773f57e343\") " pod="openstack/mariadb-client-4-default" Nov 25 10:14:28 crc kubenswrapper[4932]: I1125 10:14:28.693992 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 25 10:14:29 crc kubenswrapper[4932]: I1125 10:14:29.179343 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 25 10:14:29 crc kubenswrapper[4932]: I1125 10:14:29.537525 4932 generic.go:334] "Generic (PLEG): container finished" podID="c8e826c0-1bf7-4a09-bbee-ce773f57e343" containerID="56fb9f6caef65305697bf299aac33393b254c31a2939d30daadd6fd1afcfab86" exitCode=0 Nov 25 10:14:29 crc kubenswrapper[4932]: I1125 10:14:29.537781 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"c8e826c0-1bf7-4a09-bbee-ce773f57e343","Type":"ContainerDied","Data":"56fb9f6caef65305697bf299aac33393b254c31a2939d30daadd6fd1afcfab86"} Nov 25 10:14:29 crc kubenswrapper[4932]: I1125 10:14:29.538039 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"c8e826c0-1bf7-4a09-bbee-ce773f57e343","Type":"ContainerStarted","Data":"b7fac90e93db0e1bf37c7e72c515efeb225baa3d029e03f3c9cb05e2cddf4e83"} Nov 25 10:14:30 crc kubenswrapper[4932]: I1125 10:14:30.887670 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 25 10:14:30 crc kubenswrapper[4932]: I1125 10:14:30.904990 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-4-default_c8e826c0-1bf7-4a09-bbee-ce773f57e343/mariadb-client-4-default/0.log" Nov 25 10:14:30 crc kubenswrapper[4932]: I1125 10:14:30.926549 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 25 10:14:30 crc kubenswrapper[4932]: I1125 10:14:30.931745 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 25 10:14:30 crc kubenswrapper[4932]: I1125 10:14:30.990540 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkbmg\" (UniqueName: \"kubernetes.io/projected/c8e826c0-1bf7-4a09-bbee-ce773f57e343-kube-api-access-hkbmg\") pod \"c8e826c0-1bf7-4a09-bbee-ce773f57e343\" (UID: \"c8e826c0-1bf7-4a09-bbee-ce773f57e343\") " Nov 25 10:14:31 crc kubenswrapper[4932]: I1125 10:14:31.003425 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8e826c0-1bf7-4a09-bbee-ce773f57e343-kube-api-access-hkbmg" (OuterVolumeSpecName: "kube-api-access-hkbmg") pod "c8e826c0-1bf7-4a09-bbee-ce773f57e343" (UID: "c8e826c0-1bf7-4a09-bbee-ce773f57e343"). InnerVolumeSpecName "kube-api-access-hkbmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:31 crc kubenswrapper[4932]: I1125 10:14:31.092583 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkbmg\" (UniqueName: \"kubernetes.io/projected/c8e826c0-1bf7-4a09-bbee-ce773f57e343-kube-api-access-hkbmg\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:31 crc kubenswrapper[4932]: I1125 10:14:31.555760 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7fac90e93db0e1bf37c7e72c515efeb225baa3d029e03f3c9cb05e2cddf4e83" Nov 25 10:14:31 crc kubenswrapper[4932]: I1125 10:14:31.555856 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 25 10:14:32 crc kubenswrapper[4932]: I1125 10:14:32.606633 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:14:32 crc kubenswrapper[4932]: E1125 10:14:32.607128 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:14:32 crc kubenswrapper[4932]: I1125 10:14:32.625308 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8e826c0-1bf7-4a09-bbee-ce773f57e343" path="/var/lib/kubelet/pods/c8e826c0-1bf7-4a09-bbee-ce773f57e343/volumes" Nov 25 10:14:35 crc kubenswrapper[4932]: I1125 10:14:35.331331 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-5-default"] Nov 25 10:14:35 crc kubenswrapper[4932]: E1125 10:14:35.332485 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8e826c0-1bf7-4a09-bbee-ce773f57e343" containerName="mariadb-client-4-default" Nov 25 10:14:35 crc kubenswrapper[4932]: I1125 10:14:35.332507 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8e826c0-1bf7-4a09-bbee-ce773f57e343" containerName="mariadb-client-4-default" Nov 25 10:14:35 crc kubenswrapper[4932]: I1125 10:14:35.332746 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8e826c0-1bf7-4a09-bbee-ce773f57e343" containerName="mariadb-client-4-default" Nov 25 10:14:35 crc kubenswrapper[4932]: I1125 10:14:35.333442 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 25 10:14:35 crc kubenswrapper[4932]: I1125 10:14:35.339620 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 25 10:14:35 crc kubenswrapper[4932]: I1125 10:14:35.340074 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-tmn25" Nov 25 10:14:35 crc kubenswrapper[4932]: I1125 10:14:35.460104 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4k4sz\" (UniqueName: \"kubernetes.io/projected/ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90-kube-api-access-4k4sz\") pod \"mariadb-client-5-default\" (UID: \"ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90\") " pod="openstack/mariadb-client-5-default" Nov 25 10:14:35 crc kubenswrapper[4932]: I1125 10:14:35.561665 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4k4sz\" (UniqueName: \"kubernetes.io/projected/ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90-kube-api-access-4k4sz\") pod \"mariadb-client-5-default\" (UID: \"ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90\") " pod="openstack/mariadb-client-5-default" Nov 25 10:14:35 crc kubenswrapper[4932]: I1125 10:14:35.580486 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4k4sz\" (UniqueName: \"kubernetes.io/projected/ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90-kube-api-access-4k4sz\") pod \"mariadb-client-5-default\" (UID: \"ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90\") " pod="openstack/mariadb-client-5-default" Nov 25 10:14:35 crc kubenswrapper[4932]: I1125 10:14:35.653082 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 25 10:14:36 crc kubenswrapper[4932]: I1125 10:14:36.185300 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 25 10:14:36 crc kubenswrapper[4932]: I1125 10:14:36.607811 4932 generic.go:334] "Generic (PLEG): container finished" podID="ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90" containerID="4a47f0c34e0ce8c66ac5b912214f1546ba58ae5593a6dda0d17853d125824c59" exitCode=0 Nov 25 10:14:36 crc kubenswrapper[4932]: I1125 10:14:36.624079 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90","Type":"ContainerDied","Data":"4a47f0c34e0ce8c66ac5b912214f1546ba58ae5593a6dda0d17853d125824c59"} Nov 25 10:14:36 crc kubenswrapper[4932]: I1125 10:14:36.624167 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90","Type":"ContainerStarted","Data":"da105cc71d36ea077373633a08d79d1d4854e994ab889ce5bd9fe6a316471c56"} Nov 25 10:14:37 crc kubenswrapper[4932]: I1125 10:14:37.944256 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 25 10:14:37 crc kubenswrapper[4932]: I1125 10:14:37.965019 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-5-default_ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90/mariadb-client-5-default/0.log" Nov 25 10:14:37 crc kubenswrapper[4932]: I1125 10:14:37.988790 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 25 10:14:37 crc kubenswrapper[4932]: I1125 10:14:37.994465 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.097959 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4k4sz\" (UniqueName: \"kubernetes.io/projected/ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90-kube-api-access-4k4sz\") pod \"ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90\" (UID: \"ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90\") " Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.103851 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90-kube-api-access-4k4sz" (OuterVolumeSpecName: "kube-api-access-4k4sz") pod "ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90" (UID: "ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90"). InnerVolumeSpecName "kube-api-access-4k4sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.131102 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-6-default"] Nov 25 10:14:38 crc kubenswrapper[4932]: E1125 10:14:38.131649 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90" containerName="mariadb-client-5-default" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.131675 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90" containerName="mariadb-client-5-default" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.132740 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90" containerName="mariadb-client-5-default" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.133512 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.137236 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.199988 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-768jq\" (UniqueName: \"kubernetes.io/projected/3a516e80-1b2b-4931-8fe1-0dea1dec11f4-kube-api-access-768jq\") pod \"mariadb-client-6-default\" (UID: \"3a516e80-1b2b-4931-8fe1-0dea1dec11f4\") " pod="openstack/mariadb-client-6-default" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.200095 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4k4sz\" (UniqueName: \"kubernetes.io/projected/ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90-kube-api-access-4k4sz\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.301535 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-768jq\" (UniqueName: \"kubernetes.io/projected/3a516e80-1b2b-4931-8fe1-0dea1dec11f4-kube-api-access-768jq\") pod \"mariadb-client-6-default\" (UID: \"3a516e80-1b2b-4931-8fe1-0dea1dec11f4\") " pod="openstack/mariadb-client-6-default" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.322087 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-768jq\" (UniqueName: \"kubernetes.io/projected/3a516e80-1b2b-4931-8fe1-0dea1dec11f4-kube-api-access-768jq\") pod \"mariadb-client-6-default\" (UID: \"3a516e80-1b2b-4931-8fe1-0dea1dec11f4\") " pod="openstack/mariadb-client-6-default" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.464784 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.618739 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90" path="/var/lib/kubelet/pods/ec58bd87-da20-4b0c-94bd-4ec8e3a9cb90/volumes" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.625693 4932 scope.go:117] "RemoveContainer" containerID="4a47f0c34e0ce8c66ac5b912214f1546ba58ae5593a6dda0d17853d125824c59" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.625741 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 25 10:14:38 crc kubenswrapper[4932]: I1125 10:14:38.974477 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 25 10:14:38 crc kubenswrapper[4932]: W1125 10:14:38.977299 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a516e80_1b2b_4931_8fe1_0dea1dec11f4.slice/crio-dd7b6a034eff0b433b839439561271084f35dd74bec9bd3ec2661007a781eb49 WatchSource:0}: Error finding container dd7b6a034eff0b433b839439561271084f35dd74bec9bd3ec2661007a781eb49: Status 404 returned error can't find the container with id dd7b6a034eff0b433b839439561271084f35dd74bec9bd3ec2661007a781eb49 Nov 25 10:14:39 crc kubenswrapper[4932]: I1125 10:14:39.641378 4932 generic.go:334] "Generic (PLEG): container finished" podID="3a516e80-1b2b-4931-8fe1-0dea1dec11f4" containerID="b17f8147b7816273e9be9b9b4713e69ebbda2ca21b60ff13d304d4717bc94ac4" exitCode=1 Nov 25 10:14:39 crc kubenswrapper[4932]: I1125 10:14:39.641529 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"3a516e80-1b2b-4931-8fe1-0dea1dec11f4","Type":"ContainerDied","Data":"b17f8147b7816273e9be9b9b4713e69ebbda2ca21b60ff13d304d4717bc94ac4"} Nov 25 10:14:39 crc kubenswrapper[4932]: I1125 10:14:39.641934 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"3a516e80-1b2b-4931-8fe1-0dea1dec11f4","Type":"ContainerStarted","Data":"dd7b6a034eff0b433b839439561271084f35dd74bec9bd3ec2661007a781eb49"} Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.032857 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.061462 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-6-default_3a516e80-1b2b-4931-8fe1-0dea1dec11f4/mariadb-client-6-default/0.log" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.090370 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.096973 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.143766 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-768jq\" (UniqueName: \"kubernetes.io/projected/3a516e80-1b2b-4931-8fe1-0dea1dec11f4-kube-api-access-768jq\") pod \"3a516e80-1b2b-4931-8fe1-0dea1dec11f4\" (UID: \"3a516e80-1b2b-4931-8fe1-0dea1dec11f4\") " Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.149754 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a516e80-1b2b-4931-8fe1-0dea1dec11f4-kube-api-access-768jq" (OuterVolumeSpecName: "kube-api-access-768jq") pod "3a516e80-1b2b-4931-8fe1-0dea1dec11f4" (UID: "3a516e80-1b2b-4931-8fe1-0dea1dec11f4"). InnerVolumeSpecName "kube-api-access-768jq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.218955 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-7-default"] Nov 25 10:14:41 crc kubenswrapper[4932]: E1125 10:14:41.219542 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a516e80-1b2b-4931-8fe1-0dea1dec11f4" containerName="mariadb-client-6-default" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.219560 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a516e80-1b2b-4931-8fe1-0dea1dec11f4" containerName="mariadb-client-6-default" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.219922 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a516e80-1b2b-4931-8fe1-0dea1dec11f4" containerName="mariadb-client-6-default" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.221747 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.226080 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.245915 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-768jq\" (UniqueName: \"kubernetes.io/projected/3a516e80-1b2b-4931-8fe1-0dea1dec11f4-kube-api-access-768jq\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.347469 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8phv\" (UniqueName: \"kubernetes.io/projected/49609bab-934f-446a-ab96-d43ca717fa39-kube-api-access-w8phv\") pod \"mariadb-client-7-default\" (UID: \"49609bab-934f-446a-ab96-d43ca717fa39\") " pod="openstack/mariadb-client-7-default" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.449046 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8phv\" (UniqueName: \"kubernetes.io/projected/49609bab-934f-446a-ab96-d43ca717fa39-kube-api-access-w8phv\") pod \"mariadb-client-7-default\" (UID: \"49609bab-934f-446a-ab96-d43ca717fa39\") " pod="openstack/mariadb-client-7-default" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.470237 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8phv\" (UniqueName: \"kubernetes.io/projected/49609bab-934f-446a-ab96-d43ca717fa39-kube-api-access-w8phv\") pod \"mariadb-client-7-default\" (UID: \"49609bab-934f-446a-ab96-d43ca717fa39\") " pod="openstack/mariadb-client-7-default" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.547268 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.677587 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd7b6a034eff0b433b839439561271084f35dd74bec9bd3ec2661007a781eb49" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.677620 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 25 10:14:41 crc kubenswrapper[4932]: I1125 10:14:41.892366 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 25 10:14:42 crc kubenswrapper[4932]: I1125 10:14:42.616615 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a516e80-1b2b-4931-8fe1-0dea1dec11f4" path="/var/lib/kubelet/pods/3a516e80-1b2b-4931-8fe1-0dea1dec11f4/volumes" Nov 25 10:14:42 crc kubenswrapper[4932]: I1125 10:14:42.685404 4932 generic.go:334] "Generic (PLEG): container finished" podID="49609bab-934f-446a-ab96-d43ca717fa39" containerID="795fea842e3bedf0df347aaad77df2c98b53ffeb8bb6208235ba562aafae6026" exitCode=0 Nov 25 10:14:42 crc kubenswrapper[4932]: I1125 10:14:42.685454 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"49609bab-934f-446a-ab96-d43ca717fa39","Type":"ContainerDied","Data":"795fea842e3bedf0df347aaad77df2c98b53ffeb8bb6208235ba562aafae6026"} Nov 25 10:14:42 crc kubenswrapper[4932]: I1125 10:14:42.685486 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"49609bab-934f-446a-ab96-d43ca717fa39","Type":"ContainerStarted","Data":"95eebbfedeb08eafd268a8370577a9fbb80ce76529015596aa50fd063a2024bb"} Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.019429 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.037964 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-7-default_49609bab-934f-446a-ab96-d43ca717fa39/mariadb-client-7-default/0.log" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.066511 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.071150 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.088451 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8phv\" (UniqueName: \"kubernetes.io/projected/49609bab-934f-446a-ab96-d43ca717fa39-kube-api-access-w8phv\") pod \"49609bab-934f-446a-ab96-d43ca717fa39\" (UID: \"49609bab-934f-446a-ab96-d43ca717fa39\") " Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.093881 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49609bab-934f-446a-ab96-d43ca717fa39-kube-api-access-w8phv" (OuterVolumeSpecName: "kube-api-access-w8phv") pod "49609bab-934f-446a-ab96-d43ca717fa39" (UID: "49609bab-934f-446a-ab96-d43ca717fa39"). InnerVolumeSpecName "kube-api-access-w8phv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.190080 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8phv\" (UniqueName: \"kubernetes.io/projected/49609bab-934f-446a-ab96-d43ca717fa39-kube-api-access-w8phv\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.227227 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2"] Nov 25 10:14:44 crc kubenswrapper[4932]: E1125 10:14:44.227584 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49609bab-934f-446a-ab96-d43ca717fa39" containerName="mariadb-client-7-default" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.227599 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="49609bab-934f-446a-ab96-d43ca717fa39" containerName="mariadb-client-7-default" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.227786 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="49609bab-934f-446a-ab96-d43ca717fa39" containerName="mariadb-client-7-default" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.229416 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.235246 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.291372 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8n52t\" (UniqueName: \"kubernetes.io/projected/01f9d5e0-553a-4f29-89a9-fd984d5c2439-kube-api-access-8n52t\") pod \"mariadb-client-2\" (UID: \"01f9d5e0-553a-4f29-89a9-fd984d5c2439\") " pod="openstack/mariadb-client-2" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.392992 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8n52t\" (UniqueName: \"kubernetes.io/projected/01f9d5e0-553a-4f29-89a9-fd984d5c2439-kube-api-access-8n52t\") pod \"mariadb-client-2\" (UID: \"01f9d5e0-553a-4f29-89a9-fd984d5c2439\") " pod="openstack/mariadb-client-2" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.414785 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8n52t\" (UniqueName: \"kubernetes.io/projected/01f9d5e0-553a-4f29-89a9-fd984d5c2439-kube-api-access-8n52t\") pod \"mariadb-client-2\" (UID: \"01f9d5e0-553a-4f29-89a9-fd984d5c2439\") " pod="openstack/mariadb-client-2" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.555982 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.616290 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49609bab-934f-446a-ab96-d43ca717fa39" path="/var/lib/kubelet/pods/49609bab-934f-446a-ab96-d43ca717fa39/volumes" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.699596 4932 scope.go:117] "RemoveContainer" containerID="795fea842e3bedf0df347aaad77df2c98b53ffeb8bb6208235ba562aafae6026" Nov 25 10:14:44 crc kubenswrapper[4932]: I1125 10:14:44.699842 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 25 10:14:45 crc kubenswrapper[4932]: I1125 10:14:45.031040 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 25 10:14:45 crc kubenswrapper[4932]: W1125 10:14:45.034981 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod01f9d5e0_553a_4f29_89a9_fd984d5c2439.slice/crio-11d23c50f43d9adda3c1fd2dfbed9b568dac3da8a7e54c6cc292397ecee5f7e4 WatchSource:0}: Error finding container 11d23c50f43d9adda3c1fd2dfbed9b568dac3da8a7e54c6cc292397ecee5f7e4: Status 404 returned error can't find the container with id 11d23c50f43d9adda3c1fd2dfbed9b568dac3da8a7e54c6cc292397ecee5f7e4 Nov 25 10:14:45 crc kubenswrapper[4932]: I1125 10:14:45.605719 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:14:45 crc kubenswrapper[4932]: E1125 10:14:45.606327 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:14:45 crc kubenswrapper[4932]: I1125 10:14:45.715062 4932 generic.go:334] "Generic (PLEG): container finished" podID="01f9d5e0-553a-4f29-89a9-fd984d5c2439" containerID="0918a9b87d80726389b5b445fc8070e80178521c51ad4a58ebdbb78c2cda4e56" exitCode=0 Nov 25 10:14:45 crc kubenswrapper[4932]: I1125 10:14:45.715110 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"01f9d5e0-553a-4f29-89a9-fd984d5c2439","Type":"ContainerDied","Data":"0918a9b87d80726389b5b445fc8070e80178521c51ad4a58ebdbb78c2cda4e56"} Nov 25 10:14:45 crc kubenswrapper[4932]: I1125 10:14:45.715140 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"01f9d5e0-553a-4f29-89a9-fd984d5c2439","Type":"ContainerStarted","Data":"11d23c50f43d9adda3c1fd2dfbed9b568dac3da8a7e54c6cc292397ecee5f7e4"} Nov 25 10:14:47 crc kubenswrapper[4932]: I1125 10:14:47.068274 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 25 10:14:47 crc kubenswrapper[4932]: I1125 10:14:47.085631 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2_01f9d5e0-553a-4f29-89a9-fd984d5c2439/mariadb-client-2/0.log" Nov 25 10:14:47 crc kubenswrapper[4932]: I1125 10:14:47.110709 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2"] Nov 25 10:14:47 crc kubenswrapper[4932]: I1125 10:14:47.117401 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2"] Nov 25 10:14:47 crc kubenswrapper[4932]: I1125 10:14:47.132180 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8n52t\" (UniqueName: \"kubernetes.io/projected/01f9d5e0-553a-4f29-89a9-fd984d5c2439-kube-api-access-8n52t\") pod \"01f9d5e0-553a-4f29-89a9-fd984d5c2439\" (UID: \"01f9d5e0-553a-4f29-89a9-fd984d5c2439\") " Nov 25 10:14:47 crc kubenswrapper[4932]: I1125 10:14:47.137643 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01f9d5e0-553a-4f29-89a9-fd984d5c2439-kube-api-access-8n52t" (OuterVolumeSpecName: "kube-api-access-8n52t") pod "01f9d5e0-553a-4f29-89a9-fd984d5c2439" (UID: "01f9d5e0-553a-4f29-89a9-fd984d5c2439"). InnerVolumeSpecName "kube-api-access-8n52t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:14:47 crc kubenswrapper[4932]: I1125 10:14:47.233398 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8n52t\" (UniqueName: \"kubernetes.io/projected/01f9d5e0-553a-4f29-89a9-fd984d5c2439-kube-api-access-8n52t\") on node \"crc\" DevicePath \"\"" Nov 25 10:14:47 crc kubenswrapper[4932]: I1125 10:14:47.734294 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="11d23c50f43d9adda3c1fd2dfbed9b568dac3da8a7e54c6cc292397ecee5f7e4" Nov 25 10:14:47 crc kubenswrapper[4932]: I1125 10:14:47.734335 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 25 10:14:48 crc kubenswrapper[4932]: I1125 10:14:48.617293 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01f9d5e0-553a-4f29-89a9-fd984d5c2439" path="/var/lib/kubelet/pods/01f9d5e0-553a-4f29-89a9-fd984d5c2439/volumes" Nov 25 10:14:58 crc kubenswrapper[4932]: I1125 10:14:58.605717 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:14:58 crc kubenswrapper[4932]: E1125 10:14:58.612247 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.147272 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb"] Nov 25 10:15:00 crc kubenswrapper[4932]: E1125 10:15:00.147676 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01f9d5e0-553a-4f29-89a9-fd984d5c2439" containerName="mariadb-client-2" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.147693 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="01f9d5e0-553a-4f29-89a9-fd984d5c2439" containerName="mariadb-client-2" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.147859 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="01f9d5e0-553a-4f29-89a9-fd984d5c2439" containerName="mariadb-client-2" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.148436 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.151366 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.151413 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.158433 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb"] Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.276622 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5pzr\" (UniqueName: \"kubernetes.io/projected/39a689b7-0688-4234-9dd8-4482c9ef03f7-kube-api-access-j5pzr\") pod \"collect-profiles-29401095-5bmhb\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.276717 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39a689b7-0688-4234-9dd8-4482c9ef03f7-config-volume\") pod \"collect-profiles-29401095-5bmhb\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.276749 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/39a689b7-0688-4234-9dd8-4482c9ef03f7-secret-volume\") pod \"collect-profiles-29401095-5bmhb\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.378354 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39a689b7-0688-4234-9dd8-4482c9ef03f7-config-volume\") pod \"collect-profiles-29401095-5bmhb\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.378421 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/39a689b7-0688-4234-9dd8-4482c9ef03f7-secret-volume\") pod \"collect-profiles-29401095-5bmhb\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.378489 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5pzr\" (UniqueName: \"kubernetes.io/projected/39a689b7-0688-4234-9dd8-4482c9ef03f7-kube-api-access-j5pzr\") pod \"collect-profiles-29401095-5bmhb\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.379466 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39a689b7-0688-4234-9dd8-4482c9ef03f7-config-volume\") pod \"collect-profiles-29401095-5bmhb\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.385349 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/39a689b7-0688-4234-9dd8-4482c9ef03f7-secret-volume\") pod \"collect-profiles-29401095-5bmhb\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.395945 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5pzr\" (UniqueName: \"kubernetes.io/projected/39a689b7-0688-4234-9dd8-4482c9ef03f7-kube-api-access-j5pzr\") pod \"collect-profiles-29401095-5bmhb\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.470482 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:00 crc kubenswrapper[4932]: I1125 10:15:00.911474 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb"] Nov 25 10:15:01 crc kubenswrapper[4932]: I1125 10:15:01.841816 4932 generic.go:334] "Generic (PLEG): container finished" podID="39a689b7-0688-4234-9dd8-4482c9ef03f7" containerID="89e31562136feb2c0d42ae343294db22e2a1a8aaa064a32443787b36e1c8516b" exitCode=0 Nov 25 10:15:01 crc kubenswrapper[4932]: I1125 10:15:01.841909 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" event={"ID":"39a689b7-0688-4234-9dd8-4482c9ef03f7","Type":"ContainerDied","Data":"89e31562136feb2c0d42ae343294db22e2a1a8aaa064a32443787b36e1c8516b"} Nov 25 10:15:01 crc kubenswrapper[4932]: I1125 10:15:01.842156 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" event={"ID":"39a689b7-0688-4234-9dd8-4482c9ef03f7","Type":"ContainerStarted","Data":"250791c682a31f454738e937f811d6378c01401856aeac622d0b6fdb3e0349a1"} Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.149596 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.267724 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39a689b7-0688-4234-9dd8-4482c9ef03f7-config-volume\") pod \"39a689b7-0688-4234-9dd8-4482c9ef03f7\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.267844 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/39a689b7-0688-4234-9dd8-4482c9ef03f7-secret-volume\") pod \"39a689b7-0688-4234-9dd8-4482c9ef03f7\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.268028 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5pzr\" (UniqueName: \"kubernetes.io/projected/39a689b7-0688-4234-9dd8-4482c9ef03f7-kube-api-access-j5pzr\") pod \"39a689b7-0688-4234-9dd8-4482c9ef03f7\" (UID: \"39a689b7-0688-4234-9dd8-4482c9ef03f7\") " Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.268290 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39a689b7-0688-4234-9dd8-4482c9ef03f7-config-volume" (OuterVolumeSpecName: "config-volume") pod "39a689b7-0688-4234-9dd8-4482c9ef03f7" (UID: "39a689b7-0688-4234-9dd8-4482c9ef03f7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.268426 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39a689b7-0688-4234-9dd8-4482c9ef03f7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.273202 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39a689b7-0688-4234-9dd8-4482c9ef03f7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "39a689b7-0688-4234-9dd8-4482c9ef03f7" (UID: "39a689b7-0688-4234-9dd8-4482c9ef03f7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.273386 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39a689b7-0688-4234-9dd8-4482c9ef03f7-kube-api-access-j5pzr" (OuterVolumeSpecName: "kube-api-access-j5pzr") pod "39a689b7-0688-4234-9dd8-4482c9ef03f7" (UID: "39a689b7-0688-4234-9dd8-4482c9ef03f7"). InnerVolumeSpecName "kube-api-access-j5pzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.370326 4932 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/39a689b7-0688-4234-9dd8-4482c9ef03f7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.370418 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5pzr\" (UniqueName: \"kubernetes.io/projected/39a689b7-0688-4234-9dd8-4482c9ef03f7-kube-api-access-j5pzr\") on node \"crc\" DevicePath \"\"" Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.858122 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" event={"ID":"39a689b7-0688-4234-9dd8-4482c9ef03f7","Type":"ContainerDied","Data":"250791c682a31f454738e937f811d6378c01401856aeac622d0b6fdb3e0349a1"} Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.858163 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="250791c682a31f454738e937f811d6378c01401856aeac622d0b6fdb3e0349a1" Nov 25 10:15:03 crc kubenswrapper[4932]: I1125 10:15:03.858161 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb" Nov 25 10:15:04 crc kubenswrapper[4932]: I1125 10:15:04.223736 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d"] Nov 25 10:15:04 crc kubenswrapper[4932]: I1125 10:15:04.232777 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401050-4n64d"] Nov 25 10:15:04 crc kubenswrapper[4932]: I1125 10:15:04.620177 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a398f8a1-0132-4daf-b96b-885c2c15bcfa" path="/var/lib/kubelet/pods/a398f8a1-0132-4daf-b96b-885c2c15bcfa/volumes" Nov 25 10:15:08 crc kubenswrapper[4932]: I1125 10:15:08.956113 4932 scope.go:117] "RemoveContainer" containerID="cf61ff54fe204ad9b467ac67a8bcb270edcb42989d6d6f38225cd11a86530e32" Nov 25 10:15:13 crc kubenswrapper[4932]: I1125 10:15:13.605618 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:15:13 crc kubenswrapper[4932]: E1125 10:15:13.606298 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:15:27 crc kubenswrapper[4932]: I1125 10:15:27.607337 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:15:27 crc kubenswrapper[4932]: E1125 10:15:27.608613 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:15:41 crc kubenswrapper[4932]: I1125 10:15:41.608452 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:15:41 crc kubenswrapper[4932]: E1125 10:15:41.609882 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:15:53 crc kubenswrapper[4932]: I1125 10:15:53.606750 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:15:53 crc kubenswrapper[4932]: E1125 10:15:53.607493 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:16:06 crc kubenswrapper[4932]: I1125 10:16:06.606143 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:16:06 crc kubenswrapper[4932]: E1125 10:16:06.607101 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:16:09 crc kubenswrapper[4932]: I1125 10:16:09.039873 4932 scope.go:117] "RemoveContainer" containerID="7667aedad8afe934f6d67cb341e47ee4b49f6256a77beb6a1ee682d4be1c175e" Nov 25 10:16:19 crc kubenswrapper[4932]: I1125 10:16:19.606103 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:16:19 crc kubenswrapper[4932]: E1125 10:16:19.607073 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:16:34 crc kubenswrapper[4932]: I1125 10:16:34.606155 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:16:34 crc kubenswrapper[4932]: E1125 10:16:34.607233 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:16:46 crc kubenswrapper[4932]: I1125 10:16:46.608094 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:16:46 crc kubenswrapper[4932]: E1125 10:16:46.609970 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:17:01 crc kubenswrapper[4932]: I1125 10:17:01.607027 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:17:01 crc kubenswrapper[4932]: E1125 10:17:01.608652 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:17:14 crc kubenswrapper[4932]: I1125 10:17:14.606043 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:17:14 crc kubenswrapper[4932]: E1125 10:17:14.606873 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:17:27 crc kubenswrapper[4932]: I1125 10:17:27.606731 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:17:27 crc kubenswrapper[4932]: E1125 10:17:27.608332 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:17:38 crc kubenswrapper[4932]: I1125 10:17:38.607531 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:17:39 crc kubenswrapper[4932]: I1125 10:17:39.292579 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"5afb8b5224b9f6d2fa11c4acc1cecf78fcbf4f0caae59d8d4bbde36efe0769d9"} Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.015154 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Nov 25 10:18:07 crc kubenswrapper[4932]: E1125 10:18:07.016528 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39a689b7-0688-4234-9dd8-4482c9ef03f7" containerName="collect-profiles" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.016544 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="39a689b7-0688-4234-9dd8-4482c9ef03f7" containerName="collect-profiles" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.016727 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="39a689b7-0688-4234-9dd8-4482c9ef03f7" containerName="collect-profiles" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.017239 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.020448 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-tmn25" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.025461 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.077429 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-0854dd73-17a2-4cf6-84f6-05865ac5e0c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0854dd73-17a2-4cf6-84f6-05865ac5e0c9\") pod \"mariadb-copy-data\" (UID: \"02ffb32b-ab46-4a26-9fde-94b40250534d\") " pod="openstack/mariadb-copy-data" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.077502 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccvt5\" (UniqueName: \"kubernetes.io/projected/02ffb32b-ab46-4a26-9fde-94b40250534d-kube-api-access-ccvt5\") pod \"mariadb-copy-data\" (UID: \"02ffb32b-ab46-4a26-9fde-94b40250534d\") " pod="openstack/mariadb-copy-data" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.179251 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-0854dd73-17a2-4cf6-84f6-05865ac5e0c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0854dd73-17a2-4cf6-84f6-05865ac5e0c9\") pod \"mariadb-copy-data\" (UID: \"02ffb32b-ab46-4a26-9fde-94b40250534d\") " pod="openstack/mariadb-copy-data" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.179329 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccvt5\" (UniqueName: \"kubernetes.io/projected/02ffb32b-ab46-4a26-9fde-94b40250534d-kube-api-access-ccvt5\") pod \"mariadb-copy-data\" (UID: \"02ffb32b-ab46-4a26-9fde-94b40250534d\") " pod="openstack/mariadb-copy-data" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.182849 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.182904 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-0854dd73-17a2-4cf6-84f6-05865ac5e0c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0854dd73-17a2-4cf6-84f6-05865ac5e0c9\") pod \"mariadb-copy-data\" (UID: \"02ffb32b-ab46-4a26-9fde-94b40250534d\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/124ddc74f5e48ee0106b2f6db75259fb77d137d1fa0142205b2fce6e394878d3/globalmount\"" pod="openstack/mariadb-copy-data" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.204538 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccvt5\" (UniqueName: \"kubernetes.io/projected/02ffb32b-ab46-4a26-9fde-94b40250534d-kube-api-access-ccvt5\") pod \"mariadb-copy-data\" (UID: \"02ffb32b-ab46-4a26-9fde-94b40250534d\") " pod="openstack/mariadb-copy-data" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.217690 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-0854dd73-17a2-4cf6-84f6-05865ac5e0c9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-0854dd73-17a2-4cf6-84f6-05865ac5e0c9\") pod \"mariadb-copy-data\" (UID: \"02ffb32b-ab46-4a26-9fde-94b40250534d\") " pod="openstack/mariadb-copy-data" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.374545 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 25 10:18:07 crc kubenswrapper[4932]: I1125 10:18:07.894696 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 25 10:18:08 crc kubenswrapper[4932]: I1125 10:18:08.579534 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"02ffb32b-ab46-4a26-9fde-94b40250534d","Type":"ContainerStarted","Data":"07b86332171771de807b7b6b7e554b5888eecef85e7233ce4bbe9522a24cd794"} Nov 25 10:18:08 crc kubenswrapper[4932]: I1125 10:18:08.579581 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"02ffb32b-ab46-4a26-9fde-94b40250534d","Type":"ContainerStarted","Data":"d4d9fb694ad296479f80e2ddeeca5b29225b587c09eb6a5f37201b34a14e03da"} Nov 25 10:18:08 crc kubenswrapper[4932]: I1125 10:18:08.596680 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=3.596658513 podStartE2EDuration="3.596658513s" podCreationTimestamp="2025-11-25 10:18:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:18:08.592304228 +0000 UTC m=+5348.718333781" watchObservedRunningTime="2025-11-25 10:18:08.596658513 +0000 UTC m=+5348.722688076" Nov 25 10:18:11 crc kubenswrapper[4932]: I1125 10:18:11.761921 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 25 10:18:11 crc kubenswrapper[4932]: I1125 10:18:11.765097 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 25 10:18:11 crc kubenswrapper[4932]: I1125 10:18:11.768327 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 25 10:18:11 crc kubenswrapper[4932]: I1125 10:18:11.955388 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w67hv\" (UniqueName: \"kubernetes.io/projected/90115657-4793-423c-ac9a-63a2e2e6c861-kube-api-access-w67hv\") pod \"mariadb-client\" (UID: \"90115657-4793-423c-ac9a-63a2e2e6c861\") " pod="openstack/mariadb-client" Nov 25 10:18:12 crc kubenswrapper[4932]: I1125 10:18:12.056915 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w67hv\" (UniqueName: \"kubernetes.io/projected/90115657-4793-423c-ac9a-63a2e2e6c861-kube-api-access-w67hv\") pod \"mariadb-client\" (UID: \"90115657-4793-423c-ac9a-63a2e2e6c861\") " pod="openstack/mariadb-client" Nov 25 10:18:12 crc kubenswrapper[4932]: I1125 10:18:12.077023 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w67hv\" (UniqueName: \"kubernetes.io/projected/90115657-4793-423c-ac9a-63a2e2e6c861-kube-api-access-w67hv\") pod \"mariadb-client\" (UID: \"90115657-4793-423c-ac9a-63a2e2e6c861\") " pod="openstack/mariadb-client" Nov 25 10:18:12 crc kubenswrapper[4932]: I1125 10:18:12.088313 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 25 10:18:12 crc kubenswrapper[4932]: I1125 10:18:12.516643 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 25 10:18:12 crc kubenswrapper[4932]: I1125 10:18:12.639990 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"90115657-4793-423c-ac9a-63a2e2e6c861","Type":"ContainerStarted","Data":"db8b218bc0159a423cc29c43e732a883a73cfcf7a66f8e55ea7adec32dc656f2"} Nov 25 10:18:13 crc kubenswrapper[4932]: I1125 10:18:13.622120 4932 generic.go:334] "Generic (PLEG): container finished" podID="90115657-4793-423c-ac9a-63a2e2e6c861" containerID="6eabe436fa2bc424ca4a5638346fb7571990122e9d64fda0b7fedc0837baa3a0" exitCode=0 Nov 25 10:18:13 crc kubenswrapper[4932]: I1125 10:18:13.622171 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"90115657-4793-423c-ac9a-63a2e2e6c861","Type":"ContainerDied","Data":"6eabe436fa2bc424ca4a5638346fb7571990122e9d64fda0b7fedc0837baa3a0"} Nov 25 10:18:14 crc kubenswrapper[4932]: I1125 10:18:14.941244 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 25 10:18:14 crc kubenswrapper[4932]: I1125 10:18:14.978138 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_90115657-4793-423c-ac9a-63a2e2e6c861/mariadb-client/0.log" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.010062 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.016838 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.106991 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w67hv\" (UniqueName: \"kubernetes.io/projected/90115657-4793-423c-ac9a-63a2e2e6c861-kube-api-access-w67hv\") pod \"90115657-4793-423c-ac9a-63a2e2e6c861\" (UID: \"90115657-4793-423c-ac9a-63a2e2e6c861\") " Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.113364 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90115657-4793-423c-ac9a-63a2e2e6c861-kube-api-access-w67hv" (OuterVolumeSpecName: "kube-api-access-w67hv") pod "90115657-4793-423c-ac9a-63a2e2e6c861" (UID: "90115657-4793-423c-ac9a-63a2e2e6c861"). InnerVolumeSpecName "kube-api-access-w67hv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.160697 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 25 10:18:15 crc kubenswrapper[4932]: E1125 10:18:15.161088 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90115657-4793-423c-ac9a-63a2e2e6c861" containerName="mariadb-client" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.161110 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="90115657-4793-423c-ac9a-63a2e2e6c861" containerName="mariadb-client" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.161335 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="90115657-4793-423c-ac9a-63a2e2e6c861" containerName="mariadb-client" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.161958 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.175692 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.208863 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w67hv\" (UniqueName: \"kubernetes.io/projected/90115657-4793-423c-ac9a-63a2e2e6c861-kube-api-access-w67hv\") on node \"crc\" DevicePath \"\"" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.310500 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5qjm\" (UniqueName: \"kubernetes.io/projected/de1c4436-d2d3-4908-842a-7b3db13dabde-kube-api-access-h5qjm\") pod \"mariadb-client\" (UID: \"de1c4436-d2d3-4908-842a-7b3db13dabde\") " pod="openstack/mariadb-client" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.412671 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5qjm\" (UniqueName: \"kubernetes.io/projected/de1c4436-d2d3-4908-842a-7b3db13dabde-kube-api-access-h5qjm\") pod \"mariadb-client\" (UID: \"de1c4436-d2d3-4908-842a-7b3db13dabde\") " pod="openstack/mariadb-client" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.444660 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5qjm\" (UniqueName: \"kubernetes.io/projected/de1c4436-d2d3-4908-842a-7b3db13dabde-kube-api-access-h5qjm\") pod \"mariadb-client\" (UID: \"de1c4436-d2d3-4908-842a-7b3db13dabde\") " pod="openstack/mariadb-client" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.482904 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.650156 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db8b218bc0159a423cc29c43e732a883a73cfcf7a66f8e55ea7adec32dc656f2" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.650252 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.670642 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/mariadb-client" oldPodUID="90115657-4793-423c-ac9a-63a2e2e6c861" podUID="de1c4436-d2d3-4908-842a-7b3db13dabde" Nov 25 10:18:15 crc kubenswrapper[4932]: I1125 10:18:15.923215 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 25 10:18:16 crc kubenswrapper[4932]: I1125 10:18:16.616499 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90115657-4793-423c-ac9a-63a2e2e6c861" path="/var/lib/kubelet/pods/90115657-4793-423c-ac9a-63a2e2e6c861/volumes" Nov 25 10:18:16 crc kubenswrapper[4932]: I1125 10:18:16.659919 4932 generic.go:334] "Generic (PLEG): container finished" podID="de1c4436-d2d3-4908-842a-7b3db13dabde" containerID="9c608aa597a75669ec401dbd68ad9df1b92e633d4b32cff12025bcf9af01bf84" exitCode=0 Nov 25 10:18:16 crc kubenswrapper[4932]: I1125 10:18:16.659993 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"de1c4436-d2d3-4908-842a-7b3db13dabde","Type":"ContainerDied","Data":"9c608aa597a75669ec401dbd68ad9df1b92e633d4b32cff12025bcf9af01bf84"} Nov 25 10:18:16 crc kubenswrapper[4932]: I1125 10:18:16.660075 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"de1c4436-d2d3-4908-842a-7b3db13dabde","Type":"ContainerStarted","Data":"ee738bbde3b89567cb85d07b60a8bc304ce3f9e8a8cdb9338d8a7d016ea93a34"} Nov 25 10:18:17 crc kubenswrapper[4932]: I1125 10:18:17.966917 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 25 10:18:17 crc kubenswrapper[4932]: I1125 10:18:17.987398 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_de1c4436-d2d3-4908-842a-7b3db13dabde/mariadb-client/0.log" Nov 25 10:18:18 crc kubenswrapper[4932]: I1125 10:18:18.022571 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 25 10:18:18 crc kubenswrapper[4932]: I1125 10:18:18.029426 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 25 10:18:18 crc kubenswrapper[4932]: I1125 10:18:18.060596 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5qjm\" (UniqueName: \"kubernetes.io/projected/de1c4436-d2d3-4908-842a-7b3db13dabde-kube-api-access-h5qjm\") pod \"de1c4436-d2d3-4908-842a-7b3db13dabde\" (UID: \"de1c4436-d2d3-4908-842a-7b3db13dabde\") " Nov 25 10:18:18 crc kubenswrapper[4932]: I1125 10:18:18.069542 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de1c4436-d2d3-4908-842a-7b3db13dabde-kube-api-access-h5qjm" (OuterVolumeSpecName: "kube-api-access-h5qjm") pod "de1c4436-d2d3-4908-842a-7b3db13dabde" (UID: "de1c4436-d2d3-4908-842a-7b3db13dabde"). InnerVolumeSpecName "kube-api-access-h5qjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:18:18 crc kubenswrapper[4932]: I1125 10:18:18.162923 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5qjm\" (UniqueName: \"kubernetes.io/projected/de1c4436-d2d3-4908-842a-7b3db13dabde-kube-api-access-h5qjm\") on node \"crc\" DevicePath \"\"" Nov 25 10:18:18 crc kubenswrapper[4932]: I1125 10:18:18.631729 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de1c4436-d2d3-4908-842a-7b3db13dabde" path="/var/lib/kubelet/pods/de1c4436-d2d3-4908-842a-7b3db13dabde/volumes" Nov 25 10:18:18 crc kubenswrapper[4932]: I1125 10:18:18.677956 4932 scope.go:117] "RemoveContainer" containerID="9c608aa597a75669ec401dbd68ad9df1b92e633d4b32cff12025bcf9af01bf84" Nov 25 10:18:18 crc kubenswrapper[4932]: I1125 10:18:18.678077 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.038397 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 10:19:14 crc kubenswrapper[4932]: E1125 10:19:14.039452 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de1c4436-d2d3-4908-842a-7b3db13dabde" containerName="mariadb-client" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.039469 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="de1c4436-d2d3-4908-842a-7b3db13dabde" containerName="mariadb-client" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.040443 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="de1c4436-d2d3-4908-842a-7b3db13dabde" containerName="mariadb-client" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.041257 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.050057 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.050649 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.050925 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-s2fkz" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.051068 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.052344 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.057690 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.068301 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.070698 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.077132 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.079075 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.084348 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.091206 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194444 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a31e476e-0a99-440e-8d70-9afa3f28468f-config\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194548 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1dc7eead-6ba1-477d-8815-fd11d101f5b3-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194574 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zl4xw\" (UniqueName: \"kubernetes.io/projected/a31e476e-0a99-440e-8d70-9afa3f28468f-kube-api-access-zl4xw\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194640 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-e798c536-e864-4764-bfe5-219d0712eac8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e798c536-e864-4764-bfe5-219d0712eac8\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194689 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a31e476e-0a99-440e-8d70-9afa3f28468f-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194716 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194765 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzqbz\" (UniqueName: \"kubernetes.io/projected/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-kube-api-access-qzqbz\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194798 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31e476e-0a99-440e-8d70-9afa3f28468f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194815 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cgw5\" (UniqueName: \"kubernetes.io/projected/1dc7eead-6ba1-477d-8815-fd11d101f5b3-kube-api-access-2cgw5\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194856 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194875 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194914 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc7eead-6ba1-477d-8815-fd11d101f5b3-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194941 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc7eead-6ba1-477d-8815-fd11d101f5b3-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.194962 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-84600639-14e1-493d-829d-613c8dd4bc2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-84600639-14e1-493d-829d-613c8dd4bc2f\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.195005 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a31e476e-0a99-440e-8d70-9afa3f28468f-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.195035 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1dc7eead-6ba1-477d-8815-fd11d101f5b3-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.195074 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.195109 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.195130 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1dc7eead-6ba1-477d-8815-fd11d101f5b3-config\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.195166 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dc7eead-6ba1-477d-8815-fd11d101f5b3-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.195208 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-config\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.195237 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31e476e-0a99-440e-8d70-9afa3f28468f-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.195283 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-385350b4-b402-45d6-bf3a-b286f1b9ecfc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-385350b4-b402-45d6-bf3a-b286f1b9ecfc\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.195310 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31e476e-0a99-440e-8d70-9afa3f28468f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.263553 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.265315 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.270035 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.270315 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.270494 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-dgtlx" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.270648 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.285150 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.287091 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.297516 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-config\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.297773 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31e476e-0a99-440e-8d70-9afa3f28468f-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.297854 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-385350b4-b402-45d6-bf3a-b286f1b9ecfc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-385350b4-b402-45d6-bf3a-b286f1b9ecfc\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.297945 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31e476e-0a99-440e-8d70-9afa3f28468f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298026 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a31e476e-0a99-440e-8d70-9afa3f28468f-config\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298093 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1dc7eead-6ba1-477d-8815-fd11d101f5b3-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298163 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zl4xw\" (UniqueName: \"kubernetes.io/projected/a31e476e-0a99-440e-8d70-9afa3f28468f-kube-api-access-zl4xw\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298272 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-e798c536-e864-4764-bfe5-219d0712eac8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e798c536-e864-4764-bfe5-219d0712eac8\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298350 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a31e476e-0a99-440e-8d70-9afa3f28468f-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298453 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298547 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzqbz\" (UniqueName: \"kubernetes.io/projected/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-kube-api-access-qzqbz\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298629 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31e476e-0a99-440e-8d70-9afa3f28468f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298697 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cgw5\" (UniqueName: \"kubernetes.io/projected/1dc7eead-6ba1-477d-8815-fd11d101f5b3-kube-api-access-2cgw5\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298772 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298854 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298925 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc7eead-6ba1-477d-8815-fd11d101f5b3-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.298999 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc7eead-6ba1-477d-8815-fd11d101f5b3-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.299068 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-84600639-14e1-493d-829d-613c8dd4bc2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-84600639-14e1-493d-829d-613c8dd4bc2f\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.299150 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a31e476e-0a99-440e-8d70-9afa3f28468f-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.299253 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1dc7eead-6ba1-477d-8815-fd11d101f5b3-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.299331 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.299413 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1dc7eead-6ba1-477d-8815-fd11d101f5b3-config\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.299488 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.299573 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dc7eead-6ba1-477d-8815-fd11d101f5b3-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.300668 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/a31e476e-0a99-440e-8d70-9afa3f28468f-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.300988 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a31e476e-0a99-440e-8d70-9afa3f28468f-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.301495 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-config\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.301632 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1dc7eead-6ba1-477d-8815-fd11d101f5b3-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.302803 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1dc7eead-6ba1-477d-8815-fd11d101f5b3-config\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.303558 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.304984 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.307599 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.307644 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-e798c536-e864-4764-bfe5-219d0712eac8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e798c536-e864-4764-bfe5-219d0712eac8\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/37c510f55f30b5f64ea8c339cadb209bedeacbca5cca86333d16f529544ca023/globalmount\"" pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.307986 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1dc7eead-6ba1-477d-8815-fd11d101f5b3-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.308266 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.308289 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-84600639-14e1-493d-829d-613c8dd4bc2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-84600639-14e1-493d-829d-613c8dd4bc2f\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/08bf6a6720060dcbe4275f54e08a751bf2b25e8731b52bed8041874f004f6255/globalmount\"" pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.308391 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.308418 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-385350b4-b402-45d6-bf3a-b286f1b9ecfc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-385350b4-b402-45d6-bf3a-b286f1b9ecfc\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b23ef0beb1145c1b55621fed25c1604bbb88fdc396e72bc68e150fa6458533f2/globalmount\"" pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.312455 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a31e476e-0a99-440e-8d70-9afa3f28468f-config\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.315886 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.318073 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.337457 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31e476e-0a99-440e-8d70-9afa3f28468f-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.337932 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a31e476e-0a99-440e-8d70-9afa3f28468f-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.338549 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.341004 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.344481 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a31e476e-0a99-440e-8d70-9afa3f28468f-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.345252 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzqbz\" (UniqueName: \"kubernetes.io/projected/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-kube-api-access-qzqbz\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.345304 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd08d3e4-3584-48f7-81bb-5e68d8dbee06-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.345491 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dc7eead-6ba1-477d-8815-fd11d101f5b3-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.346758 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zl4xw\" (UniqueName: \"kubernetes.io/projected/a31e476e-0a99-440e-8d70-9afa3f28468f-kube-api-access-zl4xw\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.349498 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc7eead-6ba1-477d-8815-fd11d101f5b3-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.358928 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dc7eead-6ba1-477d-8815-fd11d101f5b3-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.359716 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cgw5\" (UniqueName: \"kubernetes.io/projected/1dc7eead-6ba1-477d-8815-fd11d101f5b3-kube-api-access-2cgw5\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.362238 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.393664 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.400367 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-84600639-14e1-493d-829d-613c8dd4bc2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-84600639-14e1-493d-829d-613c8dd4bc2f\") pod \"ovsdbserver-sb-2\" (UID: \"a31e476e-0a99-440e-8d70-9afa3f28468f\") " pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401031 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401229 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ad2ca2-b985-410f-9287-9b78b4d41318-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401283 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/34d8c35b-505d-4465-ade8-b8a7dbf8474c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401304 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/34d8c35b-505d-4465-ade8-b8a7dbf8474c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401338 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a1a2e05-b136-4e44-b95c-69203b8430f0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401360 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/69ad2ca2-b985-410f-9287-9b78b4d41318-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401423 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a1a2e05-b136-4e44-b95c-69203b8430f0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401529 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwlsp\" (UniqueName: \"kubernetes.io/projected/3a1a2e05-b136-4e44-b95c-69203b8430f0-kube-api-access-zwlsp\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401588 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/34d8c35b-505d-4465-ade8-b8a7dbf8474c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401625 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ad2ca2-b985-410f-9287-9b78b4d41318-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401658 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-e720366b-3c6b-4bff-b294-af10b253a827\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e720366b-3c6b-4bff-b294-af10b253a827\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401682 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a1a2e05-b136-4e44-b95c-69203b8430f0-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401728 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69ad2ca2-b985-410f-9287-9b78b4d41318-config\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401744 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d8c35b-505d-4465-ade8-b8a7dbf8474c-config\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401777 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/34d8c35b-505d-4465-ade8-b8a7dbf8474c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401793 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a1a2e05-b136-4e44-b95c-69203b8430f0-config\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401815 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1aaea677-6030-44cb-9b4f-a9837193ed2a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1aaea677-6030-44cb-9b4f-a9837193ed2a\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401852 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a1a2e05-b136-4e44-b95c-69203b8430f0-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401924 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlqhz\" (UniqueName: \"kubernetes.io/projected/34d8c35b-505d-4465-ade8-b8a7dbf8474c-kube-api-access-qlqhz\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401958 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9snd7\" (UniqueName: \"kubernetes.io/projected/69ad2ca2-b985-410f-9287-9b78b4d41318-kube-api-access-9snd7\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.401985 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d8c35b-505d-4465-ade8-b8a7dbf8474c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.402016 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3a1a2e05-b136-4e44-b95c-69203b8430f0-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.402041 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ad2ca2-b985-410f-9287-9b78b4d41318-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.402262 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/69ad2ca2-b985-410f-9287-9b78b4d41318-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.402328 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-aa1c8732-fe5e-468c-ae02-48c82870d961\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa1c8732-fe5e-468c-ae02-48c82870d961\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.417200 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-e798c536-e864-4764-bfe5-219d0712eac8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e798c536-e864-4764-bfe5-219d0712eac8\") pod \"ovsdbserver-sb-1\" (UID: \"bd08d3e4-3584-48f7-81bb-5e68d8dbee06\") " pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.417573 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-385350b4-b402-45d6-bf3a-b286f1b9ecfc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-385350b4-b402-45d6-bf3a-b286f1b9ecfc\") pod \"ovsdbserver-sb-0\" (UID: \"1dc7eead-6ba1-477d-8815-fd11d101f5b3\") " pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.503947 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlqhz\" (UniqueName: \"kubernetes.io/projected/34d8c35b-505d-4465-ade8-b8a7dbf8474c-kube-api-access-qlqhz\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.503993 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9snd7\" (UniqueName: \"kubernetes.io/projected/69ad2ca2-b985-410f-9287-9b78b4d41318-kube-api-access-9snd7\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504019 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d8c35b-505d-4465-ade8-b8a7dbf8474c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504045 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3a1a2e05-b136-4e44-b95c-69203b8430f0-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504079 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ad2ca2-b985-410f-9287-9b78b4d41318-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504104 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/69ad2ca2-b985-410f-9287-9b78b4d41318-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504130 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-aa1c8732-fe5e-468c-ae02-48c82870d961\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa1c8732-fe5e-468c-ae02-48c82870d961\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504183 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ad2ca2-b985-410f-9287-9b78b4d41318-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504228 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/34d8c35b-505d-4465-ade8-b8a7dbf8474c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504248 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/34d8c35b-505d-4465-ade8-b8a7dbf8474c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504268 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a1a2e05-b136-4e44-b95c-69203b8430f0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504286 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/69ad2ca2-b985-410f-9287-9b78b4d41318-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504329 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a1a2e05-b136-4e44-b95c-69203b8430f0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504350 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwlsp\" (UniqueName: \"kubernetes.io/projected/3a1a2e05-b136-4e44-b95c-69203b8430f0-kube-api-access-zwlsp\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504381 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/34d8c35b-505d-4465-ade8-b8a7dbf8474c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504398 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ad2ca2-b985-410f-9287-9b78b4d41318-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504426 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-e720366b-3c6b-4bff-b294-af10b253a827\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e720366b-3c6b-4bff-b294-af10b253a827\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504447 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a1a2e05-b136-4e44-b95c-69203b8430f0-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504477 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d8c35b-505d-4465-ade8-b8a7dbf8474c-config\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504499 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69ad2ca2-b985-410f-9287-9b78b4d41318-config\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504527 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/34d8c35b-505d-4465-ade8-b8a7dbf8474c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504555 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a1a2e05-b136-4e44-b95c-69203b8430f0-config\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504579 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1aaea677-6030-44cb-9b4f-a9837193ed2a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1aaea677-6030-44cb-9b4f-a9837193ed2a\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504611 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a1a2e05-b136-4e44-b95c-69203b8430f0-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504745 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/3a1a2e05-b136-4e44-b95c-69203b8430f0-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.504815 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/34d8c35b-505d-4465-ade8-b8a7dbf8474c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.505670 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/34d8c35b-505d-4465-ade8-b8a7dbf8474c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.505915 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/69ad2ca2-b985-410f-9287-9b78b4d41318-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.505938 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3a1a2e05-b136-4e44-b95c-69203b8430f0-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.506601 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/34d8c35b-505d-4465-ade8-b8a7dbf8474c-config\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.507504 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/69ad2ca2-b985-410f-9287-9b78b4d41318-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.508128 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a1a2e05-b136-4e44-b95c-69203b8430f0-config\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.508734 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69ad2ca2-b985-410f-9287-9b78b4d41318-config\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.509259 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ad2ca2-b985-410f-9287-9b78b4d41318-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.509265 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/34d8c35b-505d-4465-ade8-b8a7dbf8474c-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.512138 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a1a2e05-b136-4e44-b95c-69203b8430f0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.516090 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ad2ca2-b985-410f-9287-9b78b4d41318-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.517596 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a1a2e05-b136-4e44-b95c-69203b8430f0-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.521215 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3a1a2e05-b136-4e44-b95c-69203b8430f0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.521764 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.521809 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-e720366b-3c6b-4bff-b294-af10b253a827\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e720366b-3c6b-4bff-b294-af10b253a827\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/8ea7b350259417eb86a584c9d58c0ecf400bf5d3d80b9414dadd06a9252c3dc7/globalmount\"" pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.522722 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/34d8c35b-505d-4465-ade8-b8a7dbf8474c-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.523810 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34d8c35b-505d-4465-ade8-b8a7dbf8474c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.525806 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlqhz\" (UniqueName: \"kubernetes.io/projected/34d8c35b-505d-4465-ade8-b8a7dbf8474c-kube-api-access-qlqhz\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.526292 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9snd7\" (UniqueName: \"kubernetes.io/projected/69ad2ca2-b985-410f-9287-9b78b4d41318-kube-api-access-9snd7\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.526438 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.526475 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-aa1c8732-fe5e-468c-ae02-48c82870d961\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa1c8732-fe5e-468c-ae02-48c82870d961\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ac20eda29bf8943be1f0015486ef08b614e5ce55f65d7c2779fc8d3c70e37b4f/globalmount\"" pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.526567 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.526604 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1aaea677-6030-44cb-9b4f-a9837193ed2a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1aaea677-6030-44cb-9b4f-a9837193ed2a\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f669d4b6c33e18a1824ae29ea62cf38f1c25bac9c9a647cd7c58f7588b35bb73/globalmount\"" pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.535036 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ad2ca2-b985-410f-9287-9b78b4d41318-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.536164 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwlsp\" (UniqueName: \"kubernetes.io/projected/3a1a2e05-b136-4e44-b95c-69203b8430f0-kube-api-access-zwlsp\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.562266 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-aa1c8732-fe5e-468c-ae02-48c82870d961\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aa1c8732-fe5e-468c-ae02-48c82870d961\") pod \"ovsdbserver-nb-2\" (UID: \"69ad2ca2-b985-410f-9287-9b78b4d41318\") " pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.564367 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1aaea677-6030-44cb-9b4f-a9837193ed2a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1aaea677-6030-44cb-9b4f-a9837193ed2a\") pod \"ovsdbserver-nb-1\" (UID: \"3a1a2e05-b136-4e44-b95c-69203b8430f0\") " pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.568891 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-e720366b-3c6b-4bff-b294-af10b253a827\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e720366b-3c6b-4bff-b294-af10b253a827\") pod \"ovsdbserver-nb-0\" (UID: \"34d8c35b-505d-4465-ade8-b8a7dbf8474c\") " pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.597440 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.667025 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.692042 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.711739 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.744089 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:14 crc kubenswrapper[4932]: I1125 10:19:14.751752 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:15 crc kubenswrapper[4932]: I1125 10:19:15.035299 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 10:19:15 crc kubenswrapper[4932]: I1125 10:19:15.157971 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1dc7eead-6ba1-477d-8815-fd11d101f5b3","Type":"ContainerStarted","Data":"72660037ffd051fde278e5955119fefb00eb69cec31d98a80cd798a7d6f8deeb"} Nov 25 10:19:15 crc kubenswrapper[4932]: I1125 10:19:15.165018 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 10:19:15 crc kubenswrapper[4932]: W1125 10:19:15.171597 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34d8c35b_505d_4465_ade8_b8a7dbf8474c.slice/crio-3be393c5f46519395218426d13174ca1ce8682895155abb9d03395ecb071fc23 WatchSource:0}: Error finding container 3be393c5f46519395218426d13174ca1ce8682895155abb9d03395ecb071fc23: Status 404 returned error can't find the container with id 3be393c5f46519395218426d13174ca1ce8682895155abb9d03395ecb071fc23 Nov 25 10:19:15 crc kubenswrapper[4932]: I1125 10:19:15.348884 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 25 10:19:15 crc kubenswrapper[4932]: W1125 10:19:15.388447 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd08d3e4_3584_48f7_81bb_5e68d8dbee06.slice/crio-333dbf34b50ccc7781dbef098d4f99d53538fd2be618cc4f301e9fc7a81e1c51 WatchSource:0}: Error finding container 333dbf34b50ccc7781dbef098d4f99d53538fd2be618cc4f301e9fc7a81e1c51: Status 404 returned error can't find the container with id 333dbf34b50ccc7781dbef098d4f99d53538fd2be618cc4f301e9fc7a81e1c51 Nov 25 10:19:15 crc kubenswrapper[4932]: I1125 10:19:15.475325 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.119225 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 25 10:19:16 crc kubenswrapper[4932]: W1125 10:19:16.125267 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda31e476e_0a99_440e_8d70_9afa3f28468f.slice/crio-f98dcd62cc1866ecf81dcf0cd26e82db0d77c0eab82b96956272b42ee5dc25e4 WatchSource:0}: Error finding container f98dcd62cc1866ecf81dcf0cd26e82db0d77c0eab82b96956272b42ee5dc25e4: Status 404 returned error can't find the container with id f98dcd62cc1866ecf81dcf0cd26e82db0d77c0eab82b96956272b42ee5dc25e4 Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.166001 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"69ad2ca2-b985-410f-9287-9b78b4d41318","Type":"ContainerStarted","Data":"2b6b3d9023bbc0566bb62f5c9a4506840a7f6d374351fe409a846275c4c13604"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.166043 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"69ad2ca2-b985-410f-9287-9b78b4d41318","Type":"ContainerStarted","Data":"69ae89abf16103d8c87b529c66cdc5d7edf951938d17629b68934511e3447254"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.166054 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"69ad2ca2-b985-410f-9287-9b78b4d41318","Type":"ContainerStarted","Data":"6d0109a71ce2f11c8e651caca218cc29cae5e72036fa2f76a52a50c527e1d1f1"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.167866 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"a31e476e-0a99-440e-8d70-9afa3f28468f","Type":"ContainerStarted","Data":"f98dcd62cc1866ecf81dcf0cd26e82db0d77c0eab82b96956272b42ee5dc25e4"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.170112 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"34d8c35b-505d-4465-ade8-b8a7dbf8474c","Type":"ContainerStarted","Data":"cafb8097368be8bdd6f6df97ca97f6c333468a3ab9d081f3e5a0077817d72df6"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.170145 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"34d8c35b-505d-4465-ade8-b8a7dbf8474c","Type":"ContainerStarted","Data":"2fa8f8cede47c76ccf4b4da09b1442be3fb4e13e65ce21d4594c042f84b21aa2"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.170155 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"34d8c35b-505d-4465-ade8-b8a7dbf8474c","Type":"ContainerStarted","Data":"3be393c5f46519395218426d13174ca1ce8682895155abb9d03395ecb071fc23"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.173441 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1dc7eead-6ba1-477d-8815-fd11d101f5b3","Type":"ContainerStarted","Data":"663b7d46b495d4c2bee0b6b00eadf309891b2e1b99ea88200dce8757edcf5f35"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.173489 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"1dc7eead-6ba1-477d-8815-fd11d101f5b3","Type":"ContainerStarted","Data":"4c5ff60b56138bc1fe9b0426e2ffd4d0445dc009fb60db66340fb1b46003cda6"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.179917 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"bd08d3e4-3584-48f7-81bb-5e68d8dbee06","Type":"ContainerStarted","Data":"8758a040d75eaee88b96d21b122406f1b5a1970f2cbb8f16982dc8355cf76526"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.179963 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"bd08d3e4-3584-48f7-81bb-5e68d8dbee06","Type":"ContainerStarted","Data":"6212e9ead00e8268b82867828559cfc575d92c6ebd5c98037094691a9f6a05b7"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.179973 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"bd08d3e4-3584-48f7-81bb-5e68d8dbee06","Type":"ContainerStarted","Data":"333dbf34b50ccc7781dbef098d4f99d53538fd2be618cc4f301e9fc7a81e1c51"} Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.190864 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=3.190844146 podStartE2EDuration="3.190844146s" podCreationTimestamp="2025-11-25 10:19:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:16.183226098 +0000 UTC m=+5416.309255661" watchObservedRunningTime="2025-11-25 10:19:16.190844146 +0000 UTC m=+5416.316873709" Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.212873 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=3.212852557 podStartE2EDuration="3.212852557s" podCreationTimestamp="2025-11-25 10:19:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:16.202743307 +0000 UTC m=+5416.328772870" watchObservedRunningTime="2025-11-25 10:19:16.212852557 +0000 UTC m=+5416.338882120" Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.237813 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=3.237793621 podStartE2EDuration="3.237793621s" podCreationTimestamp="2025-11-25 10:19:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:16.235807745 +0000 UTC m=+5416.361837318" watchObservedRunningTime="2025-11-25 10:19:16.237793621 +0000 UTC m=+5416.363823184" Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.273424 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=3.273403132 podStartE2EDuration="3.273403132s" podCreationTimestamp="2025-11-25 10:19:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:16.268779689 +0000 UTC m=+5416.394809272" watchObservedRunningTime="2025-11-25 10:19:16.273403132 +0000 UTC m=+5416.399432695" Nov 25 10:19:16 crc kubenswrapper[4932]: I1125 10:19:16.528295 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.194749 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"3a1a2e05-b136-4e44-b95c-69203b8430f0","Type":"ContainerStarted","Data":"0c45c179c5502d6a103de756f376fe3683fc54ab860727567d0ade7c40966b85"} Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.195125 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"3a1a2e05-b136-4e44-b95c-69203b8430f0","Type":"ContainerStarted","Data":"07d0caa1dd9a98fc77743e0b0d9f9a9c04a76a4fc67abe5062eaafdef25af040"} Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.195148 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"3a1a2e05-b136-4e44-b95c-69203b8430f0","Type":"ContainerStarted","Data":"af878e55e51b5cf9bcbd77cf5cc1cd5434ee118d5ff9776bfa15648e7f6fe6b0"} Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.198561 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"a31e476e-0a99-440e-8d70-9afa3f28468f","Type":"ContainerStarted","Data":"86c03e47f80497b41b5c2442d24580033180074186784586dac643c7014de974"} Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.198627 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"a31e476e-0a99-440e-8d70-9afa3f28468f","Type":"ContainerStarted","Data":"5537e08038705c66a7e1bc31ad79fb44041eb75065cb26bdd63ccc7e8b8b5c3a"} Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.227768 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=4.227738648 podStartE2EDuration="4.227738648s" podCreationTimestamp="2025-11-25 10:19:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:17.222810727 +0000 UTC m=+5417.348840290" watchObservedRunningTime="2025-11-25 10:19:17.227738648 +0000 UTC m=+5417.353768251" Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.244988 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=4.244964302 podStartE2EDuration="4.244964302s" podCreationTimestamp="2025-11-25 10:19:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:17.240212426 +0000 UTC m=+5417.366241989" watchObservedRunningTime="2025-11-25 10:19:17.244964302 +0000 UTC m=+5417.370993855" Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.599058 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.667305 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.693330 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.712514 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.745935 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:17 crc kubenswrapper[4932]: I1125 10:19:17.754507 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:19 crc kubenswrapper[4932]: I1125 10:19:19.598830 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:19 crc kubenswrapper[4932]: I1125 10:19:19.667374 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:19 crc kubenswrapper[4932]: I1125 10:19:19.693926 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:19 crc kubenswrapper[4932]: I1125 10:19:19.712338 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:19 crc kubenswrapper[4932]: I1125 10:19:19.745933 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:19 crc kubenswrapper[4932]: I1125 10:19:19.754407 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:20 crc kubenswrapper[4932]: I1125 10:19:20.645371 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:20 crc kubenswrapper[4932]: I1125 10:19:20.691992 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 25 10:19:20 crc kubenswrapper[4932]: I1125 10:19:20.732796 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:20 crc kubenswrapper[4932]: I1125 10:19:20.743892 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:20 crc kubenswrapper[4932]: I1125 10:19:20.755827 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:20 crc kubenswrapper[4932]: I1125 10:19:20.781981 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 25 10:19:20 crc kubenswrapper[4932]: I1125 10:19:20.797652 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Nov 25 10:19:20 crc kubenswrapper[4932]: I1125 10:19:20.814471 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:20 crc kubenswrapper[4932]: I1125 10:19:20.831161 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:20 crc kubenswrapper[4932]: I1125 10:19:20.933587 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Nov 25 10:19:20 crc kubenswrapper[4932]: I1125 10:19:20.966134 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-647ff7f659-r9btz"] Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.002161 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-647ff7f659-r9btz"] Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.002337 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.016363 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.139213 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-ovsdbserver-nb\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.139268 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45rzp\" (UniqueName: \"kubernetes.io/projected/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-kube-api-access-45rzp\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.139294 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-dns-svc\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.139376 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-config\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.242882 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-config\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.243003 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-ovsdbserver-nb\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.243034 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45rzp\" (UniqueName: \"kubernetes.io/projected/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-kube-api-access-45rzp\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.243052 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-dns-svc\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.243903 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-config\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.248798 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-dns-svc\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.249345 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-ovsdbserver-nb\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.275916 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45rzp\" (UniqueName: \"kubernetes.io/projected/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-kube-api-access-45rzp\") pod \"dnsmasq-dns-647ff7f659-r9btz\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.296616 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-647ff7f659-r9btz"] Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.297608 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.326583 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.343666 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c98db745c-7wlng"] Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.347520 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.348448 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.355990 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.396881 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c98db745c-7wlng"] Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.448317 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vn5rg\" (UniqueName: \"kubernetes.io/projected/827e62b6-8653-4c35-8627-c3055eac1032-kube-api-access-vn5rg\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.448378 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-dns-svc\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.448414 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-nb\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.448461 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-config\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.448519 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-sb\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.550292 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-config\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.550392 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-sb\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.550448 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vn5rg\" (UniqueName: \"kubernetes.io/projected/827e62b6-8653-4c35-8627-c3055eac1032-kube-api-access-vn5rg\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.550469 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-dns-svc\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.550489 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-nb\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.551173 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-config\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.551261 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-nb\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.551791 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-sb\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.551915 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-dns-svc\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.574471 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vn5rg\" (UniqueName: \"kubernetes.io/projected/827e62b6-8653-4c35-8627-c3055eac1032-kube-api-access-vn5rg\") pod \"dnsmasq-dns-6c98db745c-7wlng\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.734844 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:21 crc kubenswrapper[4932]: I1125 10:19:21.861304 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-647ff7f659-r9btz"] Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.013801 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c98db745c-7wlng"] Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.238513 4932 generic.go:334] "Generic (PLEG): container finished" podID="f0b6518d-2abf-4a27-82ea-4c2f464a0e83" containerID="de2717de1c638508737c46ca361f5e347d97e32e13e01efcea4efc988e544683" exitCode=0 Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.238566 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647ff7f659-r9btz" event={"ID":"f0b6518d-2abf-4a27-82ea-4c2f464a0e83","Type":"ContainerDied","Data":"de2717de1c638508737c46ca361f5e347d97e32e13e01efcea4efc988e544683"} Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.238948 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647ff7f659-r9btz" event={"ID":"f0b6518d-2abf-4a27-82ea-4c2f464a0e83","Type":"ContainerStarted","Data":"752f1c7c403bfb9a15fe1ffbfe9d70a81336abe84c10f4b1e4ceac9ddb701e74"} Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.240223 4932 generic.go:334] "Generic (PLEG): container finished" podID="827e62b6-8653-4c35-8627-c3055eac1032" containerID="1e3edaec830eb8ef93ad4f74da08142e55df6c6ca49fb644451029c001bc14fc" exitCode=0 Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.240687 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" event={"ID":"827e62b6-8653-4c35-8627-c3055eac1032","Type":"ContainerDied","Data":"1e3edaec830eb8ef93ad4f74da08142e55df6c6ca49fb644451029c001bc14fc"} Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.240722 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" event={"ID":"827e62b6-8653-4c35-8627-c3055eac1032","Type":"ContainerStarted","Data":"d25fad8687b4ed2401a4cba74cc57b22b84b25a69bb6d5c2fac79dbccb7efb80"} Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.580856 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.677531 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-dns-svc\") pod \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.677594 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45rzp\" (UniqueName: \"kubernetes.io/projected/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-kube-api-access-45rzp\") pod \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.677657 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-config\") pod \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.677755 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-ovsdbserver-nb\") pod \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\" (UID: \"f0b6518d-2abf-4a27-82ea-4c2f464a0e83\") " Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.682846 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-kube-api-access-45rzp" (OuterVolumeSpecName: "kube-api-access-45rzp") pod "f0b6518d-2abf-4a27-82ea-4c2f464a0e83" (UID: "f0b6518d-2abf-4a27-82ea-4c2f464a0e83"). InnerVolumeSpecName "kube-api-access-45rzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.699946 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f0b6518d-2abf-4a27-82ea-4c2f464a0e83" (UID: "f0b6518d-2abf-4a27-82ea-4c2f464a0e83"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.699945 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-config" (OuterVolumeSpecName: "config") pod "f0b6518d-2abf-4a27-82ea-4c2f464a0e83" (UID: "f0b6518d-2abf-4a27-82ea-4c2f464a0e83"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.704682 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f0b6518d-2abf-4a27-82ea-4c2f464a0e83" (UID: "f0b6518d-2abf-4a27-82ea-4c2f464a0e83"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.779766 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.779836 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.779846 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45rzp\" (UniqueName: \"kubernetes.io/projected/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-kube-api-access-45rzp\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:22 crc kubenswrapper[4932]: I1125 10:19:22.779855 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0b6518d-2abf-4a27-82ea-4c2f464a0e83-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.168352 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qx4rr"] Nov 25 10:19:23 crc kubenswrapper[4932]: E1125 10:19:23.168958 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0b6518d-2abf-4a27-82ea-4c2f464a0e83" containerName="init" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.168992 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0b6518d-2abf-4a27-82ea-4c2f464a0e83" containerName="init" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.169335 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0b6518d-2abf-4a27-82ea-4c2f464a0e83" containerName="init" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.171426 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.187079 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qx4rr"] Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.249562 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647ff7f659-r9btz" event={"ID":"f0b6518d-2abf-4a27-82ea-4c2f464a0e83","Type":"ContainerDied","Data":"752f1c7c403bfb9a15fe1ffbfe9d70a81336abe84c10f4b1e4ceac9ddb701e74"} Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.249611 4932 scope.go:117] "RemoveContainer" containerID="de2717de1c638508737c46ca361f5e347d97e32e13e01efcea4efc988e544683" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.249935 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647ff7f659-r9btz" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.251851 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" event={"ID":"827e62b6-8653-4c35-8627-c3055eac1032","Type":"ContainerStarted","Data":"260876b905360bb9f15ab852b3df65659e790b27e81e897e00b3ab107240bc15"} Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.252032 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.275122 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" podStartSLOduration=2.275087648 podStartE2EDuration="2.275087648s" podCreationTimestamp="2025-11-25 10:19:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:23.270096125 +0000 UTC m=+5423.396125698" watchObservedRunningTime="2025-11-25 10:19:23.275087648 +0000 UTC m=+5423.401117211" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.288607 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-catalog-content\") pod \"redhat-operators-qx4rr\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.288711 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-utilities\") pod \"redhat-operators-qx4rr\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.288785 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rb5nt\" (UniqueName: \"kubernetes.io/projected/76ee4ddc-6cac-4103-b87f-b951619bb038-kube-api-access-rb5nt\") pod \"redhat-operators-qx4rr\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.317908 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-647ff7f659-r9btz"] Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.324736 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-647ff7f659-r9btz"] Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.389948 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-utilities\") pod \"redhat-operators-qx4rr\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.390042 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rb5nt\" (UniqueName: \"kubernetes.io/projected/76ee4ddc-6cac-4103-b87f-b951619bb038-kube-api-access-rb5nt\") pod \"redhat-operators-qx4rr\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.390540 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-utilities\") pod \"redhat-operators-qx4rr\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.390768 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-catalog-content\") pod \"redhat-operators-qx4rr\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.391366 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-catalog-content\") pod \"redhat-operators-qx4rr\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.408622 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rb5nt\" (UniqueName: \"kubernetes.io/projected/76ee4ddc-6cac-4103-b87f-b951619bb038-kube-api-access-rb5nt\") pod \"redhat-operators-qx4rr\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.496993 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:23 crc kubenswrapper[4932]: I1125 10:19:23.961570 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qx4rr"] Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.267170 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qx4rr" event={"ID":"76ee4ddc-6cac-4103-b87f-b951619bb038","Type":"ContainerStarted","Data":"918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd"} Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.267241 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qx4rr" event={"ID":"76ee4ddc-6cac-4103-b87f-b951619bb038","Type":"ContainerStarted","Data":"3b7aa98fd0ebae91ae9ceb38c1b12e4019adb3afe1f9c756e48184b051c18aa9"} Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.392730 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.394012 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.397421 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.398111 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.510673 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f4q6\" (UniqueName: \"kubernetes.io/projected/544b5f86-62b3-4c9b-bc0c-b6d2c2f76828-kube-api-access-2f4q6\") pod \"ovn-copy-data\" (UID: \"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828\") " pod="openstack/ovn-copy-data" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.512974 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/544b5f86-62b3-4c9b-bc0c-b6d2c2f76828-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828\") " pod="openstack/ovn-copy-data" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.513458 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-9ad9c443-d22b-4075-9b37-7789f23d32b2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9ad9c443-d22b-4075-9b37-7789f23d32b2\") pod \"ovn-copy-data\" (UID: \"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828\") " pod="openstack/ovn-copy-data" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.614907 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f4q6\" (UniqueName: \"kubernetes.io/projected/544b5f86-62b3-4c9b-bc0c-b6d2c2f76828-kube-api-access-2f4q6\") pod \"ovn-copy-data\" (UID: \"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828\") " pod="openstack/ovn-copy-data" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.615301 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/544b5f86-62b3-4c9b-bc0c-b6d2c2f76828-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828\") " pod="openstack/ovn-copy-data" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.615380 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-9ad9c443-d22b-4075-9b37-7789f23d32b2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9ad9c443-d22b-4075-9b37-7789f23d32b2\") pod \"ovn-copy-data\" (UID: \"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828\") " pod="openstack/ovn-copy-data" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.619069 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0b6518d-2abf-4a27-82ea-4c2f464a0e83" path="/var/lib/kubelet/pods/f0b6518d-2abf-4a27-82ea-4c2f464a0e83/volumes" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.624988 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.625210 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-9ad9c443-d22b-4075-9b37-7789f23d32b2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9ad9c443-d22b-4075-9b37-7789f23d32b2\") pod \"ovn-copy-data\" (UID: \"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/c7c004c286e6c2979995b06762387efce2c8f73e73d59da34427b0521e0d0b34/globalmount\"" pod="openstack/ovn-copy-data" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.625511 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/544b5f86-62b3-4c9b-bc0c-b6d2c2f76828-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828\") " pod="openstack/ovn-copy-data" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.652499 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f4q6\" (UniqueName: \"kubernetes.io/projected/544b5f86-62b3-4c9b-bc0c-b6d2c2f76828-kube-api-access-2f4q6\") pod \"ovn-copy-data\" (UID: \"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828\") " pod="openstack/ovn-copy-data" Nov 25 10:19:24 crc kubenswrapper[4932]: I1125 10:19:24.733754 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-9ad9c443-d22b-4075-9b37-7789f23d32b2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9ad9c443-d22b-4075-9b37-7789f23d32b2\") pod \"ovn-copy-data\" (UID: \"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828\") " pod="openstack/ovn-copy-data" Nov 25 10:19:25 crc kubenswrapper[4932]: I1125 10:19:25.016528 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 25 10:19:25 crc kubenswrapper[4932]: I1125 10:19:25.281642 4932 generic.go:334] "Generic (PLEG): container finished" podID="76ee4ddc-6cac-4103-b87f-b951619bb038" containerID="918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd" exitCode=0 Nov 25 10:19:25 crc kubenswrapper[4932]: I1125 10:19:25.282158 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qx4rr" event={"ID":"76ee4ddc-6cac-4103-b87f-b951619bb038","Type":"ContainerDied","Data":"918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd"} Nov 25 10:19:25 crc kubenswrapper[4932]: I1125 10:19:25.284836 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:19:25 crc kubenswrapper[4932]: I1125 10:19:25.560686 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 25 10:19:26 crc kubenswrapper[4932]: I1125 10:19:26.290016 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828","Type":"ContainerStarted","Data":"669d6f25d69e576390ea69f21a3c8d7ede430660d0166e7c13d83b1b51bb712e"} Nov 25 10:19:26 crc kubenswrapper[4932]: I1125 10:19:26.292161 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qx4rr" event={"ID":"76ee4ddc-6cac-4103-b87f-b951619bb038","Type":"ContainerStarted","Data":"a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70"} Nov 25 10:19:27 crc kubenswrapper[4932]: I1125 10:19:27.299735 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"544b5f86-62b3-4c9b-bc0c-b6d2c2f76828","Type":"ContainerStarted","Data":"55f1eae868f39e8b4e94335679bc4964049c96e4383e5623128354f9448da6c1"} Nov 25 10:19:27 crc kubenswrapper[4932]: I1125 10:19:27.301354 4932 generic.go:334] "Generic (PLEG): container finished" podID="76ee4ddc-6cac-4103-b87f-b951619bb038" containerID="a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70" exitCode=0 Nov 25 10:19:27 crc kubenswrapper[4932]: I1125 10:19:27.301392 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qx4rr" event={"ID":"76ee4ddc-6cac-4103-b87f-b951619bb038","Type":"ContainerDied","Data":"a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70"} Nov 25 10:19:27 crc kubenswrapper[4932]: I1125 10:19:27.314747 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=3.561161992 podStartE2EDuration="4.314731415s" podCreationTimestamp="2025-11-25 10:19:23 +0000 UTC" firstStartedPulling="2025-11-25 10:19:25.5658397 +0000 UTC m=+5425.691869263" lastFinishedPulling="2025-11-25 10:19:26.319409123 +0000 UTC m=+5426.445438686" observedRunningTime="2025-11-25 10:19:27.314051196 +0000 UTC m=+5427.440080769" watchObservedRunningTime="2025-11-25 10:19:27.314731415 +0000 UTC m=+5427.440760978" Nov 25 10:19:28 crc kubenswrapper[4932]: I1125 10:19:28.311829 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qx4rr" event={"ID":"76ee4ddc-6cac-4103-b87f-b951619bb038","Type":"ContainerStarted","Data":"636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a"} Nov 25 10:19:28 crc kubenswrapper[4932]: I1125 10:19:28.335672 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qx4rr" podStartSLOduration=2.900644603 podStartE2EDuration="5.335651599s" podCreationTimestamp="2025-11-25 10:19:23 +0000 UTC" firstStartedPulling="2025-11-25 10:19:25.28458024 +0000 UTC m=+5425.410609813" lastFinishedPulling="2025-11-25 10:19:27.719587246 +0000 UTC m=+5427.845616809" observedRunningTime="2025-11-25 10:19:28.330556463 +0000 UTC m=+5428.456586016" watchObservedRunningTime="2025-11-25 10:19:28.335651599 +0000 UTC m=+5428.461681162" Nov 25 10:19:31 crc kubenswrapper[4932]: I1125 10:19:31.736417 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:19:31 crc kubenswrapper[4932]: I1125 10:19:31.788283 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54564445dc-vjls5"] Nov 25 10:19:31 crc kubenswrapper[4932]: I1125 10:19:31.788495 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54564445dc-vjls5" podUID="cce306ed-cc41-4903-ad78-edc818f19fdc" containerName="dnsmasq-dns" containerID="cri-o://8bdbec87ec2659240016debd2e622b873371c12192fa78f97f9d4b28b7481223" gracePeriod=10 Nov 25 10:19:32 crc kubenswrapper[4932]: I1125 10:19:32.348439 4932 generic.go:334] "Generic (PLEG): container finished" podID="cce306ed-cc41-4903-ad78-edc818f19fdc" containerID="8bdbec87ec2659240016debd2e622b873371c12192fa78f97f9d4b28b7481223" exitCode=0 Nov 25 10:19:32 crc kubenswrapper[4932]: I1125 10:19:32.348521 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54564445dc-vjls5" event={"ID":"cce306ed-cc41-4903-ad78-edc818f19fdc","Type":"ContainerDied","Data":"8bdbec87ec2659240016debd2e622b873371c12192fa78f97f9d4b28b7481223"} Nov 25 10:19:32 crc kubenswrapper[4932]: I1125 10:19:32.885544 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.082056 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-config\") pod \"cce306ed-cc41-4903-ad78-edc818f19fdc\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.082176 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xr9sb\" (UniqueName: \"kubernetes.io/projected/cce306ed-cc41-4903-ad78-edc818f19fdc-kube-api-access-xr9sb\") pod \"cce306ed-cc41-4903-ad78-edc818f19fdc\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.082258 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-dns-svc\") pod \"cce306ed-cc41-4903-ad78-edc818f19fdc\" (UID: \"cce306ed-cc41-4903-ad78-edc818f19fdc\") " Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.087642 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cce306ed-cc41-4903-ad78-edc818f19fdc-kube-api-access-xr9sb" (OuterVolumeSpecName: "kube-api-access-xr9sb") pod "cce306ed-cc41-4903-ad78-edc818f19fdc" (UID: "cce306ed-cc41-4903-ad78-edc818f19fdc"). InnerVolumeSpecName "kube-api-access-xr9sb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.121328 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cce306ed-cc41-4903-ad78-edc818f19fdc" (UID: "cce306ed-cc41-4903-ad78-edc818f19fdc"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.134968 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-config" (OuterVolumeSpecName: "config") pod "cce306ed-cc41-4903-ad78-edc818f19fdc" (UID: "cce306ed-cc41-4903-ad78-edc818f19fdc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.183839 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.183893 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xr9sb\" (UniqueName: \"kubernetes.io/projected/cce306ed-cc41-4903-ad78-edc818f19fdc-kube-api-access-xr9sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.183912 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cce306ed-cc41-4903-ad78-edc818f19fdc-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.357265 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54564445dc-vjls5" event={"ID":"cce306ed-cc41-4903-ad78-edc818f19fdc","Type":"ContainerDied","Data":"9532310b208e34bffc59166b263eaa55f64969ec7e6cd2e094c2bca2cb8bcf9f"} Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.357340 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54564445dc-vjls5" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.357389 4932 scope.go:117] "RemoveContainer" containerID="8bdbec87ec2659240016debd2e622b873371c12192fa78f97f9d4b28b7481223" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.381059 4932 scope.go:117] "RemoveContainer" containerID="39e91743d461a371594029d395bc50224fadf8be8f11dde72c29f5466fe2256c" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.397550 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54564445dc-vjls5"] Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.411180 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54564445dc-vjls5"] Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.499068 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.499118 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.543958 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.801152 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 25 10:19:33 crc kubenswrapper[4932]: E1125 10:19:33.801622 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cce306ed-cc41-4903-ad78-edc818f19fdc" containerName="init" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.801647 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cce306ed-cc41-4903-ad78-edc818f19fdc" containerName="init" Nov 25 10:19:33 crc kubenswrapper[4932]: E1125 10:19:33.801668 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cce306ed-cc41-4903-ad78-edc818f19fdc" containerName="dnsmasq-dns" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.801676 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cce306ed-cc41-4903-ad78-edc818f19fdc" containerName="dnsmasq-dns" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.801887 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="cce306ed-cc41-4903-ad78-edc818f19fdc" containerName="dnsmasq-dns" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.802878 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.804794 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.804861 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.805473 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-q5brb" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.805650 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.816460 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.996999 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e60db143-3c84-4636-8a3e-dfb065cc22eb-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.997330 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c9ft\" (UniqueName: \"kubernetes.io/projected/e60db143-3c84-4636-8a3e-dfb065cc22eb-kube-api-access-9c9ft\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.997407 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e60db143-3c84-4636-8a3e-dfb065cc22eb-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.997443 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e60db143-3c84-4636-8a3e-dfb065cc22eb-scripts\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.997467 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e60db143-3c84-4636-8a3e-dfb065cc22eb-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.997495 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e60db143-3c84-4636-8a3e-dfb065cc22eb-config\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:33 crc kubenswrapper[4932]: I1125 10:19:33.997526 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e60db143-3c84-4636-8a3e-dfb065cc22eb-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.099224 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e60db143-3c84-4636-8a3e-dfb065cc22eb-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.099284 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e60db143-3c84-4636-8a3e-dfb065cc22eb-scripts\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.099314 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e60db143-3c84-4636-8a3e-dfb065cc22eb-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.099345 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e60db143-3c84-4636-8a3e-dfb065cc22eb-config\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.099381 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e60db143-3c84-4636-8a3e-dfb065cc22eb-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.099424 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e60db143-3c84-4636-8a3e-dfb065cc22eb-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.099442 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c9ft\" (UniqueName: \"kubernetes.io/projected/e60db143-3c84-4636-8a3e-dfb065cc22eb-kube-api-access-9c9ft\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.100485 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e60db143-3c84-4636-8a3e-dfb065cc22eb-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.100841 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e60db143-3c84-4636-8a3e-dfb065cc22eb-config\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.101101 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e60db143-3c84-4636-8a3e-dfb065cc22eb-scripts\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.106000 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e60db143-3c84-4636-8a3e-dfb065cc22eb-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.106287 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e60db143-3c84-4636-8a3e-dfb065cc22eb-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.108320 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e60db143-3c84-4636-8a3e-dfb065cc22eb-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.120668 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c9ft\" (UniqueName: \"kubernetes.io/projected/e60db143-3c84-4636-8a3e-dfb065cc22eb-kube-api-access-9c9ft\") pod \"ovn-northd-0\" (UID: \"e60db143-3c84-4636-8a3e-dfb065cc22eb\") " pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.123854 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.417182 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.463536 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qx4rr"] Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.557049 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 10:19:34 crc kubenswrapper[4932]: W1125 10:19:34.563508 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode60db143_3c84_4636_8a3e_dfb065cc22eb.slice/crio-8d405557c0df44ecd7402b6f6ebc70aab20ebcc5c3a14dbe2168adacd691e774 WatchSource:0}: Error finding container 8d405557c0df44ecd7402b6f6ebc70aab20ebcc5c3a14dbe2168adacd691e774: Status 404 returned error can't find the container with id 8d405557c0df44ecd7402b6f6ebc70aab20ebcc5c3a14dbe2168adacd691e774 Nov 25 10:19:34 crc kubenswrapper[4932]: I1125 10:19:34.627201 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cce306ed-cc41-4903-ad78-edc818f19fdc" path="/var/lib/kubelet/pods/cce306ed-cc41-4903-ad78-edc818f19fdc/volumes" Nov 25 10:19:35 crc kubenswrapper[4932]: I1125 10:19:35.377019 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e60db143-3c84-4636-8a3e-dfb065cc22eb","Type":"ContainerStarted","Data":"19a9332df3eb99c1f8aae4eac53ec7ac4b880b0a91ed8e2716d6d27b0abfdb70"} Nov 25 10:19:35 crc kubenswrapper[4932]: I1125 10:19:35.377078 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e60db143-3c84-4636-8a3e-dfb065cc22eb","Type":"ContainerStarted","Data":"4bc0a2e43c782f6c291ca891642982d55b30e11f9806293a61b0e409dfc964a0"} Nov 25 10:19:35 crc kubenswrapper[4932]: I1125 10:19:35.377096 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e60db143-3c84-4636-8a3e-dfb065cc22eb","Type":"ContainerStarted","Data":"8d405557c0df44ecd7402b6f6ebc70aab20ebcc5c3a14dbe2168adacd691e774"} Nov 25 10:19:35 crc kubenswrapper[4932]: I1125 10:19:35.408008 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.4079879 podStartE2EDuration="2.4079879s" podCreationTimestamp="2025-11-25 10:19:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:35.398364114 +0000 UTC m=+5435.524393687" watchObservedRunningTime="2025-11-25 10:19:35.4079879 +0000 UTC m=+5435.534017473" Nov 25 10:19:36 crc kubenswrapper[4932]: I1125 10:19:36.385394 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qx4rr" podUID="76ee4ddc-6cac-4103-b87f-b951619bb038" containerName="registry-server" containerID="cri-o://636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a" gracePeriod=2 Nov 25 10:19:36 crc kubenswrapper[4932]: I1125 10:19:36.385495 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 25 10:19:36 crc kubenswrapper[4932]: I1125 10:19:36.855126 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:36 crc kubenswrapper[4932]: I1125 10:19:36.863080 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-catalog-content\") pod \"76ee4ddc-6cac-4103-b87f-b951619bb038\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " Nov 25 10:19:36 crc kubenswrapper[4932]: I1125 10:19:36.863149 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rb5nt\" (UniqueName: \"kubernetes.io/projected/76ee4ddc-6cac-4103-b87f-b951619bb038-kube-api-access-rb5nt\") pod \"76ee4ddc-6cac-4103-b87f-b951619bb038\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " Nov 25 10:19:36 crc kubenswrapper[4932]: I1125 10:19:36.863166 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-utilities\") pod \"76ee4ddc-6cac-4103-b87f-b951619bb038\" (UID: \"76ee4ddc-6cac-4103-b87f-b951619bb038\") " Nov 25 10:19:36 crc kubenswrapper[4932]: I1125 10:19:36.864145 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-utilities" (OuterVolumeSpecName: "utilities") pod "76ee4ddc-6cac-4103-b87f-b951619bb038" (UID: "76ee4ddc-6cac-4103-b87f-b951619bb038"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:19:36 crc kubenswrapper[4932]: I1125 10:19:36.867586 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76ee4ddc-6cac-4103-b87f-b951619bb038-kube-api-access-rb5nt" (OuterVolumeSpecName: "kube-api-access-rb5nt") pod "76ee4ddc-6cac-4103-b87f-b951619bb038" (UID: "76ee4ddc-6cac-4103-b87f-b951619bb038"). InnerVolumeSpecName "kube-api-access-rb5nt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:19:36 crc kubenswrapper[4932]: I1125 10:19:36.964746 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rb5nt\" (UniqueName: \"kubernetes.io/projected/76ee4ddc-6cac-4103-b87f-b951619bb038-kube-api-access-rb5nt\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:36 crc kubenswrapper[4932]: I1125 10:19:36.964774 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.395642 4932 generic.go:334] "Generic (PLEG): container finished" podID="76ee4ddc-6cac-4103-b87f-b951619bb038" containerID="636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a" exitCode=0 Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.395691 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qx4rr" event={"ID":"76ee4ddc-6cac-4103-b87f-b951619bb038","Type":"ContainerDied","Data":"636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a"} Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.397023 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qx4rr" event={"ID":"76ee4ddc-6cac-4103-b87f-b951619bb038","Type":"ContainerDied","Data":"3b7aa98fd0ebae91ae9ceb38c1b12e4019adb3afe1f9c756e48184b051c18aa9"} Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.397051 4932 scope.go:117] "RemoveContainer" containerID="636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a" Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.395715 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qx4rr" Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.419441 4932 scope.go:117] "RemoveContainer" containerID="a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70" Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.441401 4932 scope.go:117] "RemoveContainer" containerID="918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd" Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.487635 4932 scope.go:117] "RemoveContainer" containerID="636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a" Nov 25 10:19:37 crc kubenswrapper[4932]: E1125 10:19:37.488478 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a\": container with ID starting with 636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a not found: ID does not exist" containerID="636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a" Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.488517 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a"} err="failed to get container status \"636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a\": rpc error: code = NotFound desc = could not find container \"636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a\": container with ID starting with 636868d4444268d679896dc12f894d2707cf02e6dee6a9546dcc69c1d20eee3a not found: ID does not exist" Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.488544 4932 scope.go:117] "RemoveContainer" containerID="a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70" Nov 25 10:19:37 crc kubenswrapper[4932]: E1125 10:19:37.488902 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70\": container with ID starting with a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70 not found: ID does not exist" containerID="a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70" Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.488926 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70"} err="failed to get container status \"a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70\": rpc error: code = NotFound desc = could not find container \"a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70\": container with ID starting with a193db59e009595e31997a0599610cb10d84d247bed841bbd36a566b447efa70 not found: ID does not exist" Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.488942 4932 scope.go:117] "RemoveContainer" containerID="918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd" Nov 25 10:19:37 crc kubenswrapper[4932]: E1125 10:19:37.489364 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd\": container with ID starting with 918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd not found: ID does not exist" containerID="918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd" Nov 25 10:19:37 crc kubenswrapper[4932]: I1125 10:19:37.489393 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd"} err="failed to get container status \"918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd\": rpc error: code = NotFound desc = could not find container \"918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd\": container with ID starting with 918ba3177aa023c28e6ea19d8904f7f3566dc60aa81a0eb774b7b8d81f01e0cd not found: ID does not exist" Nov 25 10:19:38 crc kubenswrapper[4932]: I1125 10:19:38.311442 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "76ee4ddc-6cac-4103-b87f-b951619bb038" (UID: "76ee4ddc-6cac-4103-b87f-b951619bb038"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:19:38 crc kubenswrapper[4932]: I1125 10:19:38.388479 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76ee4ddc-6cac-4103-b87f-b951619bb038-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:38 crc kubenswrapper[4932]: I1125 10:19:38.646698 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qx4rr"] Nov 25 10:19:38 crc kubenswrapper[4932]: I1125 10:19:38.653900 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qx4rr"] Nov 25 10:19:38 crc kubenswrapper[4932]: I1125 10:19:38.975861 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-n7pqr"] Nov 25 10:19:38 crc kubenswrapper[4932]: E1125 10:19:38.976212 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76ee4ddc-6cac-4103-b87f-b951619bb038" containerName="extract-content" Nov 25 10:19:38 crc kubenswrapper[4932]: I1125 10:19:38.976232 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="76ee4ddc-6cac-4103-b87f-b951619bb038" containerName="extract-content" Nov 25 10:19:38 crc kubenswrapper[4932]: E1125 10:19:38.976253 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76ee4ddc-6cac-4103-b87f-b951619bb038" containerName="extract-utilities" Nov 25 10:19:38 crc kubenswrapper[4932]: I1125 10:19:38.976259 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="76ee4ddc-6cac-4103-b87f-b951619bb038" containerName="extract-utilities" Nov 25 10:19:38 crc kubenswrapper[4932]: E1125 10:19:38.976286 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76ee4ddc-6cac-4103-b87f-b951619bb038" containerName="registry-server" Nov 25 10:19:38 crc kubenswrapper[4932]: I1125 10:19:38.976297 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="76ee4ddc-6cac-4103-b87f-b951619bb038" containerName="registry-server" Nov 25 10:19:38 crc kubenswrapper[4932]: I1125 10:19:38.976491 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="76ee4ddc-6cac-4103-b87f-b951619bb038" containerName="registry-server" Nov 25 10:19:38 crc kubenswrapper[4932]: I1125 10:19:38.977122 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-n7pqr" Nov 25 10:19:38 crc kubenswrapper[4932]: I1125 10:19:38.986904 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-n7pqr"] Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.079137 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7abc-account-create-f6jkl"] Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.080365 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7abc-account-create-f6jkl" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.082657 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.091683 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7abc-account-create-f6jkl"] Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.106386 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5bc26237-8e21-4e9b-9f77-f3b234013bde-operator-scripts\") pod \"keystone-db-create-n7pqr\" (UID: \"5bc26237-8e21-4e9b-9f77-f3b234013bde\") " pod="openstack/keystone-db-create-n7pqr" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.106573 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lnmz\" (UniqueName: \"kubernetes.io/projected/5bc26237-8e21-4e9b-9f77-f3b234013bde-kube-api-access-8lnmz\") pod \"keystone-db-create-n7pqr\" (UID: \"5bc26237-8e21-4e9b-9f77-f3b234013bde\") " pod="openstack/keystone-db-create-n7pqr" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.106618 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9350488-929d-4db8-8a5e-5d17da1952b8-operator-scripts\") pod \"keystone-7abc-account-create-f6jkl\" (UID: \"c9350488-929d-4db8-8a5e-5d17da1952b8\") " pod="openstack/keystone-7abc-account-create-f6jkl" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.106638 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrb2j\" (UniqueName: \"kubernetes.io/projected/c9350488-929d-4db8-8a5e-5d17da1952b8-kube-api-access-jrb2j\") pod \"keystone-7abc-account-create-f6jkl\" (UID: \"c9350488-929d-4db8-8a5e-5d17da1952b8\") " pod="openstack/keystone-7abc-account-create-f6jkl" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.207973 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5bc26237-8e21-4e9b-9f77-f3b234013bde-operator-scripts\") pod \"keystone-db-create-n7pqr\" (UID: \"5bc26237-8e21-4e9b-9f77-f3b234013bde\") " pod="openstack/keystone-db-create-n7pqr" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.208375 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lnmz\" (UniqueName: \"kubernetes.io/projected/5bc26237-8e21-4e9b-9f77-f3b234013bde-kube-api-access-8lnmz\") pod \"keystone-db-create-n7pqr\" (UID: \"5bc26237-8e21-4e9b-9f77-f3b234013bde\") " pod="openstack/keystone-db-create-n7pqr" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.208410 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9350488-929d-4db8-8a5e-5d17da1952b8-operator-scripts\") pod \"keystone-7abc-account-create-f6jkl\" (UID: \"c9350488-929d-4db8-8a5e-5d17da1952b8\") " pod="openstack/keystone-7abc-account-create-f6jkl" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.208437 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrb2j\" (UniqueName: \"kubernetes.io/projected/c9350488-929d-4db8-8a5e-5d17da1952b8-kube-api-access-jrb2j\") pod \"keystone-7abc-account-create-f6jkl\" (UID: \"c9350488-929d-4db8-8a5e-5d17da1952b8\") " pod="openstack/keystone-7abc-account-create-f6jkl" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.209509 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9350488-929d-4db8-8a5e-5d17da1952b8-operator-scripts\") pod \"keystone-7abc-account-create-f6jkl\" (UID: \"c9350488-929d-4db8-8a5e-5d17da1952b8\") " pod="openstack/keystone-7abc-account-create-f6jkl" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.210783 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5bc26237-8e21-4e9b-9f77-f3b234013bde-operator-scripts\") pod \"keystone-db-create-n7pqr\" (UID: \"5bc26237-8e21-4e9b-9f77-f3b234013bde\") " pod="openstack/keystone-db-create-n7pqr" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.232926 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrb2j\" (UniqueName: \"kubernetes.io/projected/c9350488-929d-4db8-8a5e-5d17da1952b8-kube-api-access-jrb2j\") pod \"keystone-7abc-account-create-f6jkl\" (UID: \"c9350488-929d-4db8-8a5e-5d17da1952b8\") " pod="openstack/keystone-7abc-account-create-f6jkl" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.233013 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lnmz\" (UniqueName: \"kubernetes.io/projected/5bc26237-8e21-4e9b-9f77-f3b234013bde-kube-api-access-8lnmz\") pod \"keystone-db-create-n7pqr\" (UID: \"5bc26237-8e21-4e9b-9f77-f3b234013bde\") " pod="openstack/keystone-db-create-n7pqr" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.303411 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-n7pqr" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.398046 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7abc-account-create-f6jkl" Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.797749 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-n7pqr"] Nov 25 10:19:39 crc kubenswrapper[4932]: W1125 10:19:39.802452 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5bc26237_8e21_4e9b_9f77_f3b234013bde.slice/crio-45126b1a9b437d1e8f04ff9d0f48f3ad8bdc30f723d37df9aa12ef88b7260020 WatchSource:0}: Error finding container 45126b1a9b437d1e8f04ff9d0f48f3ad8bdc30f723d37df9aa12ef88b7260020: Status 404 returned error can't find the container with id 45126b1a9b437d1e8f04ff9d0f48f3ad8bdc30f723d37df9aa12ef88b7260020 Nov 25 10:19:39 crc kubenswrapper[4932]: I1125 10:19:39.877099 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7abc-account-create-f6jkl"] Nov 25 10:19:39 crc kubenswrapper[4932]: W1125 10:19:39.894779 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9350488_929d_4db8_8a5e_5d17da1952b8.slice/crio-4aab690d93733cd905f65efcc2f1b076b07fd0fa3a7a0a08932e00f2a177b65a WatchSource:0}: Error finding container 4aab690d93733cd905f65efcc2f1b076b07fd0fa3a7a0a08932e00f2a177b65a: Status 404 returned error can't find the container with id 4aab690d93733cd905f65efcc2f1b076b07fd0fa3a7a0a08932e00f2a177b65a Nov 25 10:19:40 crc kubenswrapper[4932]: I1125 10:19:40.432333 4932 generic.go:334] "Generic (PLEG): container finished" podID="c9350488-929d-4db8-8a5e-5d17da1952b8" containerID="d6660d5283c98153e44d37a76316ede92a598cf08fb655b0ece3802a81efb530" exitCode=0 Nov 25 10:19:40 crc kubenswrapper[4932]: I1125 10:19:40.432397 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7abc-account-create-f6jkl" event={"ID":"c9350488-929d-4db8-8a5e-5d17da1952b8","Type":"ContainerDied","Data":"d6660d5283c98153e44d37a76316ede92a598cf08fb655b0ece3802a81efb530"} Nov 25 10:19:40 crc kubenswrapper[4932]: I1125 10:19:40.432757 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7abc-account-create-f6jkl" event={"ID":"c9350488-929d-4db8-8a5e-5d17da1952b8","Type":"ContainerStarted","Data":"4aab690d93733cd905f65efcc2f1b076b07fd0fa3a7a0a08932e00f2a177b65a"} Nov 25 10:19:40 crc kubenswrapper[4932]: I1125 10:19:40.434611 4932 generic.go:334] "Generic (PLEG): container finished" podID="5bc26237-8e21-4e9b-9f77-f3b234013bde" containerID="623eaf68f11fb2a6869720b19ed2e51efe0e8e7ca6d07661393cef4a60309b00" exitCode=0 Nov 25 10:19:40 crc kubenswrapper[4932]: I1125 10:19:40.434671 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-n7pqr" event={"ID":"5bc26237-8e21-4e9b-9f77-f3b234013bde","Type":"ContainerDied","Data":"623eaf68f11fb2a6869720b19ed2e51efe0e8e7ca6d07661393cef4a60309b00"} Nov 25 10:19:40 crc kubenswrapper[4932]: I1125 10:19:40.434697 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-n7pqr" event={"ID":"5bc26237-8e21-4e9b-9f77-f3b234013bde","Type":"ContainerStarted","Data":"45126b1a9b437d1e8f04ff9d0f48f3ad8bdc30f723d37df9aa12ef88b7260020"} Nov 25 10:19:40 crc kubenswrapper[4932]: I1125 10:19:40.635445 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76ee4ddc-6cac-4103-b87f-b951619bb038" path="/var/lib/kubelet/pods/76ee4ddc-6cac-4103-b87f-b951619bb038/volumes" Nov 25 10:19:41 crc kubenswrapper[4932]: I1125 10:19:41.901562 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-n7pqr" Nov 25 10:19:41 crc kubenswrapper[4932]: I1125 10:19:41.908085 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7abc-account-create-f6jkl" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.059135 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lnmz\" (UniqueName: \"kubernetes.io/projected/5bc26237-8e21-4e9b-9f77-f3b234013bde-kube-api-access-8lnmz\") pod \"5bc26237-8e21-4e9b-9f77-f3b234013bde\" (UID: \"5bc26237-8e21-4e9b-9f77-f3b234013bde\") " Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.059241 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5bc26237-8e21-4e9b-9f77-f3b234013bde-operator-scripts\") pod \"5bc26237-8e21-4e9b-9f77-f3b234013bde\" (UID: \"5bc26237-8e21-4e9b-9f77-f3b234013bde\") " Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.059472 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9350488-929d-4db8-8a5e-5d17da1952b8-operator-scripts\") pod \"c9350488-929d-4db8-8a5e-5d17da1952b8\" (UID: \"c9350488-929d-4db8-8a5e-5d17da1952b8\") " Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.059705 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrb2j\" (UniqueName: \"kubernetes.io/projected/c9350488-929d-4db8-8a5e-5d17da1952b8-kube-api-access-jrb2j\") pod \"c9350488-929d-4db8-8a5e-5d17da1952b8\" (UID: \"c9350488-929d-4db8-8a5e-5d17da1952b8\") " Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.060135 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9350488-929d-4db8-8a5e-5d17da1952b8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c9350488-929d-4db8-8a5e-5d17da1952b8" (UID: "c9350488-929d-4db8-8a5e-5d17da1952b8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.060149 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5bc26237-8e21-4e9b-9f77-f3b234013bde-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5bc26237-8e21-4e9b-9f77-f3b234013bde" (UID: "5bc26237-8e21-4e9b-9f77-f3b234013bde"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.060576 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5bc26237-8e21-4e9b-9f77-f3b234013bde-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.060624 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c9350488-929d-4db8-8a5e-5d17da1952b8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.064582 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9350488-929d-4db8-8a5e-5d17da1952b8-kube-api-access-jrb2j" (OuterVolumeSpecName: "kube-api-access-jrb2j") pod "c9350488-929d-4db8-8a5e-5d17da1952b8" (UID: "c9350488-929d-4db8-8a5e-5d17da1952b8"). InnerVolumeSpecName "kube-api-access-jrb2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.065381 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bc26237-8e21-4e9b-9f77-f3b234013bde-kube-api-access-8lnmz" (OuterVolumeSpecName: "kube-api-access-8lnmz") pod "5bc26237-8e21-4e9b-9f77-f3b234013bde" (UID: "5bc26237-8e21-4e9b-9f77-f3b234013bde"). InnerVolumeSpecName "kube-api-access-8lnmz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.162445 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrb2j\" (UniqueName: \"kubernetes.io/projected/c9350488-929d-4db8-8a5e-5d17da1952b8-kube-api-access-jrb2j\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.162472 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lnmz\" (UniqueName: \"kubernetes.io/projected/5bc26237-8e21-4e9b-9f77-f3b234013bde-kube-api-access-8lnmz\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.455308 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7abc-account-create-f6jkl" event={"ID":"c9350488-929d-4db8-8a5e-5d17da1952b8","Type":"ContainerDied","Data":"4aab690d93733cd905f65efcc2f1b076b07fd0fa3a7a0a08932e00f2a177b65a"} Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.455335 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7abc-account-create-f6jkl" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.455358 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4aab690d93733cd905f65efcc2f1b076b07fd0fa3a7a0a08932e00f2a177b65a" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.457071 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-n7pqr" event={"ID":"5bc26237-8e21-4e9b-9f77-f3b234013bde","Type":"ContainerDied","Data":"45126b1a9b437d1e8f04ff9d0f48f3ad8bdc30f723d37df9aa12ef88b7260020"} Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.457100 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45126b1a9b437d1e8f04ff9d0f48f3ad8bdc30f723d37df9aa12ef88b7260020" Nov 25 10:19:42 crc kubenswrapper[4932]: I1125 10:19:42.457169 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-n7pqr" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.210377 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.640737 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-pg22p"] Nov 25 10:19:44 crc kubenswrapper[4932]: E1125 10:19:44.641484 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9350488-929d-4db8-8a5e-5d17da1952b8" containerName="mariadb-account-create" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.641521 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9350488-929d-4db8-8a5e-5d17da1952b8" containerName="mariadb-account-create" Nov 25 10:19:44 crc kubenswrapper[4932]: E1125 10:19:44.641557 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bc26237-8e21-4e9b-9f77-f3b234013bde" containerName="mariadb-database-create" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.641571 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bc26237-8e21-4e9b-9f77-f3b234013bde" containerName="mariadb-database-create" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.641853 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9350488-929d-4db8-8a5e-5d17da1952b8" containerName="mariadb-account-create" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.641889 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bc26237-8e21-4e9b-9f77-f3b234013bde" containerName="mariadb-database-create" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.642779 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-pg22p"] Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.643011 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.645964 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.646156 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.646252 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.646496 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-zs62z" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.718402 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-combined-ca-bundle\") pod \"keystone-db-sync-pg22p\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.718534 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-config-data\") pod \"keystone-db-sync-pg22p\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.718594 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nw749\" (UniqueName: \"kubernetes.io/projected/7ef2effb-8965-49db-8aa8-49fd55e6a149-kube-api-access-nw749\") pod \"keystone-db-sync-pg22p\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.820599 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-combined-ca-bundle\") pod \"keystone-db-sync-pg22p\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.820684 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-config-data\") pod \"keystone-db-sync-pg22p\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.820724 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nw749\" (UniqueName: \"kubernetes.io/projected/7ef2effb-8965-49db-8aa8-49fd55e6a149-kube-api-access-nw749\") pod \"keystone-db-sync-pg22p\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.825704 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-combined-ca-bundle\") pod \"keystone-db-sync-pg22p\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.825735 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-config-data\") pod \"keystone-db-sync-pg22p\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.839042 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nw749\" (UniqueName: \"kubernetes.io/projected/7ef2effb-8965-49db-8aa8-49fd55e6a149-kube-api-access-nw749\") pod \"keystone-db-sync-pg22p\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:44 crc kubenswrapper[4932]: I1125 10:19:44.962634 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:45 crc kubenswrapper[4932]: I1125 10:19:45.380579 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-pg22p"] Nov 25 10:19:45 crc kubenswrapper[4932]: I1125 10:19:45.480035 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pg22p" event={"ID":"7ef2effb-8965-49db-8aa8-49fd55e6a149","Type":"ContainerStarted","Data":"f429786fb25b60994aef9fd81d5fa609bdc3882e5d1e8cb6a8abb73e7c4aee4d"} Nov 25 10:19:46 crc kubenswrapper[4932]: I1125 10:19:46.490063 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pg22p" event={"ID":"7ef2effb-8965-49db-8aa8-49fd55e6a149","Type":"ContainerStarted","Data":"0db22cda3dbbcc647d0df03bb1266e5a0e8631f338f508e9793662b80f0265bd"} Nov 25 10:19:46 crc kubenswrapper[4932]: I1125 10:19:46.516138 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-pg22p" podStartSLOduration=2.516115268 podStartE2EDuration="2.516115268s" podCreationTimestamp="2025-11-25 10:19:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:46.509619882 +0000 UTC m=+5446.635649465" watchObservedRunningTime="2025-11-25 10:19:46.516115268 +0000 UTC m=+5446.642144851" Nov 25 10:19:47 crc kubenswrapper[4932]: I1125 10:19:47.503466 4932 generic.go:334] "Generic (PLEG): container finished" podID="7ef2effb-8965-49db-8aa8-49fd55e6a149" containerID="0db22cda3dbbcc647d0df03bb1266e5a0e8631f338f508e9793662b80f0265bd" exitCode=0 Nov 25 10:19:47 crc kubenswrapper[4932]: I1125 10:19:47.503533 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pg22p" event={"ID":"7ef2effb-8965-49db-8aa8-49fd55e6a149","Type":"ContainerDied","Data":"0db22cda3dbbcc647d0df03bb1266e5a0e8631f338f508e9793662b80f0265bd"} Nov 25 10:19:48 crc kubenswrapper[4932]: I1125 10:19:48.838975 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:48 crc kubenswrapper[4932]: I1125 10:19:48.902117 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-combined-ca-bundle\") pod \"7ef2effb-8965-49db-8aa8-49fd55e6a149\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " Nov 25 10:19:48 crc kubenswrapper[4932]: I1125 10:19:48.902186 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nw749\" (UniqueName: \"kubernetes.io/projected/7ef2effb-8965-49db-8aa8-49fd55e6a149-kube-api-access-nw749\") pod \"7ef2effb-8965-49db-8aa8-49fd55e6a149\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " Nov 25 10:19:48 crc kubenswrapper[4932]: I1125 10:19:48.902252 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-config-data\") pod \"7ef2effb-8965-49db-8aa8-49fd55e6a149\" (UID: \"7ef2effb-8965-49db-8aa8-49fd55e6a149\") " Nov 25 10:19:48 crc kubenswrapper[4932]: I1125 10:19:48.907798 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ef2effb-8965-49db-8aa8-49fd55e6a149-kube-api-access-nw749" (OuterVolumeSpecName: "kube-api-access-nw749") pod "7ef2effb-8965-49db-8aa8-49fd55e6a149" (UID: "7ef2effb-8965-49db-8aa8-49fd55e6a149"). InnerVolumeSpecName "kube-api-access-nw749". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:19:48 crc kubenswrapper[4932]: I1125 10:19:48.929637 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ef2effb-8965-49db-8aa8-49fd55e6a149" (UID: "7ef2effb-8965-49db-8aa8-49fd55e6a149"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:19:48 crc kubenswrapper[4932]: I1125 10:19:48.959078 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-config-data" (OuterVolumeSpecName: "config-data") pod "7ef2effb-8965-49db-8aa8-49fd55e6a149" (UID: "7ef2effb-8965-49db-8aa8-49fd55e6a149"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.018104 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nw749\" (UniqueName: \"kubernetes.io/projected/7ef2effb-8965-49db-8aa8-49fd55e6a149-kube-api-access-nw749\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.018139 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.018157 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ef2effb-8965-49db-8aa8-49fd55e6a149-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.529993 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-pg22p" event={"ID":"7ef2effb-8965-49db-8aa8-49fd55e6a149","Type":"ContainerDied","Data":"f429786fb25b60994aef9fd81d5fa609bdc3882e5d1e8cb6a8abb73e7c4aee4d"} Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.530366 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f429786fb25b60994aef9fd81d5fa609bdc3882e5d1e8cb6a8abb73e7c4aee4d" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.530447 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-pg22p" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.787568 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78f594b499-vhzbh"] Nov 25 10:19:49 crc kubenswrapper[4932]: E1125 10:19:49.788067 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ef2effb-8965-49db-8aa8-49fd55e6a149" containerName="keystone-db-sync" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.788085 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ef2effb-8965-49db-8aa8-49fd55e6a149" containerName="keystone-db-sync" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.788329 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ef2effb-8965-49db-8aa8-49fd55e6a149" containerName="keystone-db-sync" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.789593 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.802882 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78f594b499-vhzbh"] Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.812001 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-hq4bz"] Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.816505 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.824504 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-zs62z" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.824694 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.824833 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.824870 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.833037 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-dns-svc\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.833095 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9l5nq\" (UniqueName: \"kubernetes.io/projected/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-kube-api-access-9l5nq\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.833145 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-config\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.833227 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-sb\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.833282 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-nb\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.833641 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.852177 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hq4bz"] Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.934859 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-sb\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.934918 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-config-data\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.934948 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-fernet-keys\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.934972 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-combined-ca-bundle\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.935026 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-nb\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.935057 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thf2x\" (UniqueName: \"kubernetes.io/projected/099f9018-5f4f-4b1a-9359-877ab3d8aec8-kube-api-access-thf2x\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.935132 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-dns-svc\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.935161 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9l5nq\" (UniqueName: \"kubernetes.io/projected/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-kube-api-access-9l5nq\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.935234 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-config\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.935281 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-credential-keys\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.935312 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-scripts\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.936356 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-nb\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.936440 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-sb\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.936998 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-dns-svc\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.937178 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-config\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:49 crc kubenswrapper[4932]: I1125 10:19:49.962073 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9l5nq\" (UniqueName: \"kubernetes.io/projected/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-kube-api-access-9l5nq\") pod \"dnsmasq-dns-78f594b499-vhzbh\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.037142 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-scripts\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.037756 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-config-data\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.037853 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-fernet-keys\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.037945 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-combined-ca-bundle\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.038078 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thf2x\" (UniqueName: \"kubernetes.io/projected/099f9018-5f4f-4b1a-9359-877ab3d8aec8-kube-api-access-thf2x\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.038264 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-credential-keys\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.041417 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-credential-keys\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.041974 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-combined-ca-bundle\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.042539 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-fernet-keys\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.043310 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-config-data\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.044682 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-scripts\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.055013 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thf2x\" (UniqueName: \"kubernetes.io/projected/099f9018-5f4f-4b1a-9359-877ab3d8aec8-kube-api-access-thf2x\") pod \"keystone-bootstrap-hq4bz\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.135371 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.157880 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.594734 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78f594b499-vhzbh"] Nov 25 10:19:50 crc kubenswrapper[4932]: W1125 10:19:50.603411 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaeceb7b7_798f_4dab_bc6d_d7cafbd68898.slice/crio-59bcb4df80169430bf1cdb5bf7c27612dcb19478a2efac3b53ccac568cce69fa WatchSource:0}: Error finding container 59bcb4df80169430bf1cdb5bf7c27612dcb19478a2efac3b53ccac568cce69fa: Status 404 returned error can't find the container with id 59bcb4df80169430bf1cdb5bf7c27612dcb19478a2efac3b53ccac568cce69fa Nov 25 10:19:50 crc kubenswrapper[4932]: I1125 10:19:50.666671 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hq4bz"] Nov 25 10:19:51 crc kubenswrapper[4932]: I1125 10:19:51.544206 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hq4bz" event={"ID":"099f9018-5f4f-4b1a-9359-877ab3d8aec8","Type":"ContainerStarted","Data":"84a81bfd652accb4de3a7e0f99c56af42bc5527111407e000cfcfe989bb89adb"} Nov 25 10:19:51 crc kubenswrapper[4932]: I1125 10:19:51.544549 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hq4bz" event={"ID":"099f9018-5f4f-4b1a-9359-877ab3d8aec8","Type":"ContainerStarted","Data":"f2e71cfcc69677cb0d8a59722f4c24f577cd92f4adf9b7c5770f53f92c302f7a"} Nov 25 10:19:51 crc kubenswrapper[4932]: I1125 10:19:51.546854 4932 generic.go:334] "Generic (PLEG): container finished" podID="aeceb7b7-798f-4dab-bc6d-d7cafbd68898" containerID="8e590a0c40d46bf38414a850b854c7922e6abd8640d3c07733f1eb4001552fac" exitCode=0 Nov 25 10:19:51 crc kubenswrapper[4932]: I1125 10:19:51.546884 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" event={"ID":"aeceb7b7-798f-4dab-bc6d-d7cafbd68898","Type":"ContainerDied","Data":"8e590a0c40d46bf38414a850b854c7922e6abd8640d3c07733f1eb4001552fac"} Nov 25 10:19:51 crc kubenswrapper[4932]: I1125 10:19:51.546899 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" event={"ID":"aeceb7b7-798f-4dab-bc6d-d7cafbd68898","Type":"ContainerStarted","Data":"59bcb4df80169430bf1cdb5bf7c27612dcb19478a2efac3b53ccac568cce69fa"} Nov 25 10:19:51 crc kubenswrapper[4932]: I1125 10:19:51.577948 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-hq4bz" podStartSLOduration=2.5779261570000003 podStartE2EDuration="2.577926157s" podCreationTimestamp="2025-11-25 10:19:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:51.574422337 +0000 UTC m=+5451.700451900" watchObservedRunningTime="2025-11-25 10:19:51.577926157 +0000 UTC m=+5451.703955720" Nov 25 10:19:52 crc kubenswrapper[4932]: I1125 10:19:52.585683 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" event={"ID":"aeceb7b7-798f-4dab-bc6d-d7cafbd68898","Type":"ContainerStarted","Data":"b47cf42e69db299493fe73a0a4ea483afa18054b9a372a81ab59861aa978bb0f"} Nov 25 10:19:52 crc kubenswrapper[4932]: I1125 10:19:52.586004 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:19:52 crc kubenswrapper[4932]: I1125 10:19:52.605342 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" podStartSLOduration=3.605324428 podStartE2EDuration="3.605324428s" podCreationTimestamp="2025-11-25 10:19:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:52.604446723 +0000 UTC m=+5452.730476286" watchObservedRunningTime="2025-11-25 10:19:52.605324428 +0000 UTC m=+5452.731353991" Nov 25 10:19:54 crc kubenswrapper[4932]: I1125 10:19:54.607741 4932 generic.go:334] "Generic (PLEG): container finished" podID="099f9018-5f4f-4b1a-9359-877ab3d8aec8" containerID="84a81bfd652accb4de3a7e0f99c56af42bc5527111407e000cfcfe989bb89adb" exitCode=0 Nov 25 10:19:54 crc kubenswrapper[4932]: I1125 10:19:54.623788 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hq4bz" event={"ID":"099f9018-5f4f-4b1a-9359-877ab3d8aec8","Type":"ContainerDied","Data":"84a81bfd652accb4de3a7e0f99c56af42bc5527111407e000cfcfe989bb89adb"} Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.011334 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.041253 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thf2x\" (UniqueName: \"kubernetes.io/projected/099f9018-5f4f-4b1a-9359-877ab3d8aec8-kube-api-access-thf2x\") pod \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.041324 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-fernet-keys\") pod \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.041381 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-credential-keys\") pod \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.041438 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-config-data\") pod \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.041497 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-combined-ca-bundle\") pod \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.041587 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-scripts\") pod \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\" (UID: \"099f9018-5f4f-4b1a-9359-877ab3d8aec8\") " Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.058441 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "099f9018-5f4f-4b1a-9359-877ab3d8aec8" (UID: "099f9018-5f4f-4b1a-9359-877ab3d8aec8"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.058460 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "099f9018-5f4f-4b1a-9359-877ab3d8aec8" (UID: "099f9018-5f4f-4b1a-9359-877ab3d8aec8"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.059496 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/099f9018-5f4f-4b1a-9359-877ab3d8aec8-kube-api-access-thf2x" (OuterVolumeSpecName: "kube-api-access-thf2x") pod "099f9018-5f4f-4b1a-9359-877ab3d8aec8" (UID: "099f9018-5f4f-4b1a-9359-877ab3d8aec8"). InnerVolumeSpecName "kube-api-access-thf2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.060838 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-scripts" (OuterVolumeSpecName: "scripts") pod "099f9018-5f4f-4b1a-9359-877ab3d8aec8" (UID: "099f9018-5f4f-4b1a-9359-877ab3d8aec8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.071408 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-config-data" (OuterVolumeSpecName: "config-data") pod "099f9018-5f4f-4b1a-9359-877ab3d8aec8" (UID: "099f9018-5f4f-4b1a-9359-877ab3d8aec8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.096389 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "099f9018-5f4f-4b1a-9359-877ab3d8aec8" (UID: "099f9018-5f4f-4b1a-9359-877ab3d8aec8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.144405 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.144450 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thf2x\" (UniqueName: \"kubernetes.io/projected/099f9018-5f4f-4b1a-9359-877ab3d8aec8-kube-api-access-thf2x\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.144467 4932 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.144481 4932 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.144493 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.144503 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/099f9018-5f4f-4b1a-9359-877ab3d8aec8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.633232 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hq4bz" event={"ID":"099f9018-5f4f-4b1a-9359-877ab3d8aec8","Type":"ContainerDied","Data":"f2e71cfcc69677cb0d8a59722f4c24f577cd92f4adf9b7c5770f53f92c302f7a"} Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.633272 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2e71cfcc69677cb0d8a59722f4c24f577cd92f4adf9b7c5770f53f92c302f7a" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.633311 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hq4bz" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.726430 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-hq4bz"] Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.733458 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-hq4bz"] Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.818373 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-pgksb"] Nov 25 10:19:56 crc kubenswrapper[4932]: E1125 10:19:56.818780 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="099f9018-5f4f-4b1a-9359-877ab3d8aec8" containerName="keystone-bootstrap" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.818794 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="099f9018-5f4f-4b1a-9359-877ab3d8aec8" containerName="keystone-bootstrap" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.818966 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="099f9018-5f4f-4b1a-9359-877ab3d8aec8" containerName="keystone-bootstrap" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.820673 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.832023 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-pgksb"] Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.836985 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.837033 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.839797 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.839944 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-zs62z" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.840024 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.856022 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-fernet-keys\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.856236 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4d22\" (UniqueName: \"kubernetes.io/projected/0d089d8f-788c-4673-857c-dfd4289325a2-kube-api-access-q4d22\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.856306 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-combined-ca-bundle\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.856452 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-credential-keys\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.856533 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-scripts\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.856573 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-config-data\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.958142 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-credential-keys\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.958444 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-scripts\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.958478 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-config-data\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.958508 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-fernet-keys\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.958543 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4d22\" (UniqueName: \"kubernetes.io/projected/0d089d8f-788c-4673-857c-dfd4289325a2-kube-api-access-q4d22\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.958564 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-combined-ca-bundle\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.963607 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-fernet-keys\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.966765 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-config-data\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.967130 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-combined-ca-bundle\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.973519 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-scripts\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.975284 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-credential-keys\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:56 crc kubenswrapper[4932]: I1125 10:19:56.975941 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4d22\" (UniqueName: \"kubernetes.io/projected/0d089d8f-788c-4673-857c-dfd4289325a2-kube-api-access-q4d22\") pod \"keystone-bootstrap-pgksb\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:57 crc kubenswrapper[4932]: I1125 10:19:57.159796 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:19:57 crc kubenswrapper[4932]: I1125 10:19:57.613866 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-pgksb"] Nov 25 10:19:57 crc kubenswrapper[4932]: I1125 10:19:57.646634 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pgksb" event={"ID":"0d089d8f-788c-4673-857c-dfd4289325a2","Type":"ContainerStarted","Data":"64c4bf2308c1e6b9d677a702416a2cffc3e19680642692861426cf978886f1f9"} Nov 25 10:19:58 crc kubenswrapper[4932]: I1125 10:19:58.619842 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="099f9018-5f4f-4b1a-9359-877ab3d8aec8" path="/var/lib/kubelet/pods/099f9018-5f4f-4b1a-9359-877ab3d8aec8/volumes" Nov 25 10:19:58 crc kubenswrapper[4932]: I1125 10:19:58.655449 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pgksb" event={"ID":"0d089d8f-788c-4673-857c-dfd4289325a2","Type":"ContainerStarted","Data":"5fe6cbde826d535523e9144110263db39acc9943edb9fb665a14a696f13b6318"} Nov 25 10:19:58 crc kubenswrapper[4932]: I1125 10:19:58.690554 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-pgksb" podStartSLOduration=2.690535862 podStartE2EDuration="2.690535862s" podCreationTimestamp="2025-11-25 10:19:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:19:58.685155267 +0000 UTC m=+5458.811184830" watchObservedRunningTime="2025-11-25 10:19:58.690535862 +0000 UTC m=+5458.816565425" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.138232 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.206762 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c98db745c-7wlng"] Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.206987 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" podUID="827e62b6-8653-4c35-8627-c3055eac1032" containerName="dnsmasq-dns" containerID="cri-o://260876b905360bb9f15ab852b3df65659e790b27e81e897e00b3ab107240bc15" gracePeriod=10 Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.679068 4932 generic.go:334] "Generic (PLEG): container finished" podID="827e62b6-8653-4c35-8627-c3055eac1032" containerID="260876b905360bb9f15ab852b3df65659e790b27e81e897e00b3ab107240bc15" exitCode=0 Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.679154 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" event={"ID":"827e62b6-8653-4c35-8627-c3055eac1032","Type":"ContainerDied","Data":"260876b905360bb9f15ab852b3df65659e790b27e81e897e00b3ab107240bc15"} Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.679212 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" event={"ID":"827e62b6-8653-4c35-8627-c3055eac1032","Type":"ContainerDied","Data":"d25fad8687b4ed2401a4cba74cc57b22b84b25a69bb6d5c2fac79dbccb7efb80"} Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.679229 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d25fad8687b4ed2401a4cba74cc57b22b84b25a69bb6d5c2fac79dbccb7efb80" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.680938 4932 generic.go:334] "Generic (PLEG): container finished" podID="0d089d8f-788c-4673-857c-dfd4289325a2" containerID="5fe6cbde826d535523e9144110263db39acc9943edb9fb665a14a696f13b6318" exitCode=0 Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.680989 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pgksb" event={"ID":"0d089d8f-788c-4673-857c-dfd4289325a2","Type":"ContainerDied","Data":"5fe6cbde826d535523e9144110263db39acc9943edb9fb665a14a696f13b6318"} Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.733983 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.852229 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-config\") pod \"827e62b6-8653-4c35-8627-c3055eac1032\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.852391 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-sb\") pod \"827e62b6-8653-4c35-8627-c3055eac1032\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.852470 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-nb\") pod \"827e62b6-8653-4c35-8627-c3055eac1032\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.852532 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vn5rg\" (UniqueName: \"kubernetes.io/projected/827e62b6-8653-4c35-8627-c3055eac1032-kube-api-access-vn5rg\") pod \"827e62b6-8653-4c35-8627-c3055eac1032\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.852560 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-dns-svc\") pod \"827e62b6-8653-4c35-8627-c3055eac1032\" (UID: \"827e62b6-8653-4c35-8627-c3055eac1032\") " Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.858353 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/827e62b6-8653-4c35-8627-c3055eac1032-kube-api-access-vn5rg" (OuterVolumeSpecName: "kube-api-access-vn5rg") pod "827e62b6-8653-4c35-8627-c3055eac1032" (UID: "827e62b6-8653-4c35-8627-c3055eac1032"). InnerVolumeSpecName "kube-api-access-vn5rg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.900814 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "827e62b6-8653-4c35-8627-c3055eac1032" (UID: "827e62b6-8653-4c35-8627-c3055eac1032"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.901969 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "827e62b6-8653-4c35-8627-c3055eac1032" (UID: "827e62b6-8653-4c35-8627-c3055eac1032"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.906104 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "827e62b6-8653-4c35-8627-c3055eac1032" (UID: "827e62b6-8653-4c35-8627-c3055eac1032"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.906299 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-config" (OuterVolumeSpecName: "config") pod "827e62b6-8653-4c35-8627-c3055eac1032" (UID: "827e62b6-8653-4c35-8627-c3055eac1032"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.953828 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.953856 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vn5rg\" (UniqueName: \"kubernetes.io/projected/827e62b6-8653-4c35-8627-c3055eac1032-kube-api-access-vn5rg\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.953868 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.953877 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:00 crc kubenswrapper[4932]: I1125 10:20:00.953885 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/827e62b6-8653-4c35-8627-c3055eac1032-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:01 crc kubenswrapper[4932]: I1125 10:20:01.689256 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c98db745c-7wlng" Nov 25 10:20:01 crc kubenswrapper[4932]: I1125 10:20:01.733970 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c98db745c-7wlng"] Nov 25 10:20:01 crc kubenswrapper[4932]: I1125 10:20:01.741002 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c98db745c-7wlng"] Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.074430 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.176643 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-scripts\") pod \"0d089d8f-788c-4673-857c-dfd4289325a2\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.176686 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4d22\" (UniqueName: \"kubernetes.io/projected/0d089d8f-788c-4673-857c-dfd4289325a2-kube-api-access-q4d22\") pod \"0d089d8f-788c-4673-857c-dfd4289325a2\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.176764 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-config-data\") pod \"0d089d8f-788c-4673-857c-dfd4289325a2\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.176787 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-fernet-keys\") pod \"0d089d8f-788c-4673-857c-dfd4289325a2\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.176877 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-combined-ca-bundle\") pod \"0d089d8f-788c-4673-857c-dfd4289325a2\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.176927 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-credential-keys\") pod \"0d089d8f-788c-4673-857c-dfd4289325a2\" (UID: \"0d089d8f-788c-4673-857c-dfd4289325a2\") " Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.181533 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-scripts" (OuterVolumeSpecName: "scripts") pod "0d089d8f-788c-4673-857c-dfd4289325a2" (UID: "0d089d8f-788c-4673-857c-dfd4289325a2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.181559 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "0d089d8f-788c-4673-857c-dfd4289325a2" (UID: "0d089d8f-788c-4673-857c-dfd4289325a2"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.181609 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "0d089d8f-788c-4673-857c-dfd4289325a2" (UID: "0d089d8f-788c-4673-857c-dfd4289325a2"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.189329 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d089d8f-788c-4673-857c-dfd4289325a2-kube-api-access-q4d22" (OuterVolumeSpecName: "kube-api-access-q4d22") pod "0d089d8f-788c-4673-857c-dfd4289325a2" (UID: "0d089d8f-788c-4673-857c-dfd4289325a2"). InnerVolumeSpecName "kube-api-access-q4d22". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.199327 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-config-data" (OuterVolumeSpecName: "config-data") pod "0d089d8f-788c-4673-857c-dfd4289325a2" (UID: "0d089d8f-788c-4673-857c-dfd4289325a2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.209464 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d089d8f-788c-4673-857c-dfd4289325a2" (UID: "0d089d8f-788c-4673-857c-dfd4289325a2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.279280 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.279309 4932 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.279319 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.279330 4932 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.279340 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4d22\" (UniqueName: \"kubernetes.io/projected/0d089d8f-788c-4673-857c-dfd4289325a2-kube-api-access-q4d22\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.279348 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d089d8f-788c-4673-857c-dfd4289325a2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.625424 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="827e62b6-8653-4c35-8627-c3055eac1032" path="/var/lib/kubelet/pods/827e62b6-8653-4c35-8627-c3055eac1032/volumes" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.701513 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-pgksb" event={"ID":"0d089d8f-788c-4673-857c-dfd4289325a2","Type":"ContainerDied","Data":"64c4bf2308c1e6b9d677a702416a2cffc3e19680642692861426cf978886f1f9"} Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.701573 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64c4bf2308c1e6b9d677a702416a2cffc3e19680642692861426cf978886f1f9" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.701624 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-pgksb" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.869585 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7bccc74499-hbbp7"] Nov 25 10:20:02 crc kubenswrapper[4932]: E1125 10:20:02.869947 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="827e62b6-8653-4c35-8627-c3055eac1032" containerName="dnsmasq-dns" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.869970 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="827e62b6-8653-4c35-8627-c3055eac1032" containerName="dnsmasq-dns" Nov 25 10:20:02 crc kubenswrapper[4932]: E1125 10:20:02.869993 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d089d8f-788c-4673-857c-dfd4289325a2" containerName="keystone-bootstrap" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.870000 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d089d8f-788c-4673-857c-dfd4289325a2" containerName="keystone-bootstrap" Nov 25 10:20:02 crc kubenswrapper[4932]: E1125 10:20:02.870021 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="827e62b6-8653-4c35-8627-c3055eac1032" containerName="init" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.870026 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="827e62b6-8653-4c35-8627-c3055eac1032" containerName="init" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.870183 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="827e62b6-8653-4c35-8627-c3055eac1032" containerName="dnsmasq-dns" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.870223 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d089d8f-788c-4673-857c-dfd4289325a2" containerName="keystone-bootstrap" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.870788 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.873914 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.874303 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.874474 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.874590 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-zs62z" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.875073 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.875229 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.884529 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7bccc74499-hbbp7"] Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.931626 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-internal-tls-certs\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.931668 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rn4lj\" (UniqueName: \"kubernetes.io/projected/75daa68a-ae08-4bfa-9d39-6001efa58e1f-kube-api-access-rn4lj\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.931707 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-combined-ca-bundle\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.931729 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-public-tls-certs\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.931844 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-fernet-keys\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.931896 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-config-data\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.931938 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-scripts\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:02 crc kubenswrapper[4932]: I1125 10:20:02.931993 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-credential-keys\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.033416 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-internal-tls-certs\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.033494 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rn4lj\" (UniqueName: \"kubernetes.io/projected/75daa68a-ae08-4bfa-9d39-6001efa58e1f-kube-api-access-rn4lj\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.033517 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-combined-ca-bundle\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.033540 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-public-tls-certs\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.033616 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-fernet-keys\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.033664 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-config-data\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.033710 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-scripts\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.033734 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-credential-keys\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.040102 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-fernet-keys\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.043744 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-scripts\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.044728 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-public-tls-certs\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.045955 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-combined-ca-bundle\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.047405 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-config-data\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.047777 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-credential-keys\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.048783 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/75daa68a-ae08-4bfa-9d39-6001efa58e1f-internal-tls-certs\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.053670 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rn4lj\" (UniqueName: \"kubernetes.io/projected/75daa68a-ae08-4bfa-9d39-6001efa58e1f-kube-api-access-rn4lj\") pod \"keystone-7bccc74499-hbbp7\" (UID: \"75daa68a-ae08-4bfa-9d39-6001efa58e1f\") " pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.234319 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.660881 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7bccc74499-hbbp7"] Nov 25 10:20:03 crc kubenswrapper[4932]: W1125 10:20:03.671841 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75daa68a_ae08_4bfa_9d39_6001efa58e1f.slice/crio-7b57982cca62b6b5c608b9f552292feff4b58f2a00756d12a62a69c276133a32 WatchSource:0}: Error finding container 7b57982cca62b6b5c608b9f552292feff4b58f2a00756d12a62a69c276133a32: Status 404 returned error can't find the container with id 7b57982cca62b6b5c608b9f552292feff4b58f2a00756d12a62a69c276133a32 Nov 25 10:20:03 crc kubenswrapper[4932]: I1125 10:20:03.712468 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7bccc74499-hbbp7" event={"ID":"75daa68a-ae08-4bfa-9d39-6001efa58e1f","Type":"ContainerStarted","Data":"7b57982cca62b6b5c608b9f552292feff4b58f2a00756d12a62a69c276133a32"} Nov 25 10:20:04 crc kubenswrapper[4932]: I1125 10:20:04.721160 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7bccc74499-hbbp7" event={"ID":"75daa68a-ae08-4bfa-9d39-6001efa58e1f","Type":"ContainerStarted","Data":"8eb45460b8528deecfcec4af123ea4ee43fa976054cadbaf85430810bd51dd6e"} Nov 25 10:20:04 crc kubenswrapper[4932]: I1125 10:20:04.721877 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:04 crc kubenswrapper[4932]: I1125 10:20:04.748891 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7bccc74499-hbbp7" podStartSLOduration=2.748868136 podStartE2EDuration="2.748868136s" podCreationTimestamp="2025-11-25 10:20:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:20:04.744278875 +0000 UTC m=+5464.870308478" watchObservedRunningTime="2025-11-25 10:20:04.748868136 +0000 UTC m=+5464.874897699" Nov 25 10:20:07 crc kubenswrapper[4932]: I1125 10:20:07.181418 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:20:07 crc kubenswrapper[4932]: I1125 10:20:07.181761 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:20:34 crc kubenswrapper[4932]: I1125 10:20:34.825731 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7bccc74499-hbbp7" Nov 25 10:20:37 crc kubenswrapper[4932]: I1125 10:20:37.181503 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:20:37 crc kubenswrapper[4932]: I1125 10:20:37.182252 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:20:38 crc kubenswrapper[4932]: I1125 10:20:38.991431 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 10:20:38 crc kubenswrapper[4932]: I1125 10:20:38.992523 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:20:38 crc kubenswrapper[4932]: I1125 10:20:38.997716 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-djrc4" Nov 25 10:20:38 crc kubenswrapper[4932]: I1125 10:20:38.997729 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 10:20:38 crc kubenswrapper[4932]: I1125 10:20:38.997931 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.007180 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.083552 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config-secret\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.083667 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6bpf\" (UniqueName: \"kubernetes.io/projected/0dde41a7-5aa6-486f-bedf-414e833c60bf-kube-api-access-h6bpf\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.083787 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.083848 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.185433 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.185541 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.186310 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.186688 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config-secret\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.186771 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6bpf\" (UniqueName: \"kubernetes.io/projected/0dde41a7-5aa6-486f-bedf-414e833c60bf-kube-api-access-h6bpf\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.192309 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.193372 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config-secret\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.206435 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6bpf\" (UniqueName: \"kubernetes.io/projected/0dde41a7-5aa6-486f-bedf-414e833c60bf-kube-api-access-h6bpf\") pod \"openstackclient\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.320903 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:20:39 crc kubenswrapper[4932]: I1125 10:20:39.835032 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 10:20:40 crc kubenswrapper[4932]: I1125 10:20:40.060481 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"0dde41a7-5aa6-486f-bedf-414e833c60bf","Type":"ContainerStarted","Data":"428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a"} Nov 25 10:20:40 crc kubenswrapper[4932]: I1125 10:20:40.060534 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"0dde41a7-5aa6-486f-bedf-414e833c60bf","Type":"ContainerStarted","Data":"08e906836d1571553e071f2e4191e05460ec4a8e2d02dd8773ed3b6dff58c5a1"} Nov 25 10:20:40 crc kubenswrapper[4932]: I1125 10:20:40.082251 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.082230662 podStartE2EDuration="2.082230662s" podCreationTimestamp="2025-11-25 10:20:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:20:40.078789613 +0000 UTC m=+5500.204819176" watchObservedRunningTime="2025-11-25 10:20:40.082230662 +0000 UTC m=+5500.208260225" Nov 25 10:21:07 crc kubenswrapper[4932]: I1125 10:21:07.180786 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:21:07 crc kubenswrapper[4932]: I1125 10:21:07.181358 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:21:07 crc kubenswrapper[4932]: I1125 10:21:07.181414 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 10:21:07 crc kubenswrapper[4932]: I1125 10:21:07.182140 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5afb8b5224b9f6d2fa11c4acc1cecf78fcbf4f0caae59d8d4bbde36efe0769d9"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:21:07 crc kubenswrapper[4932]: I1125 10:21:07.182274 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://5afb8b5224b9f6d2fa11c4acc1cecf78fcbf4f0caae59d8d4bbde36efe0769d9" gracePeriod=600 Nov 25 10:21:08 crc kubenswrapper[4932]: I1125 10:21:08.297722 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="5afb8b5224b9f6d2fa11c4acc1cecf78fcbf4f0caae59d8d4bbde36efe0769d9" exitCode=0 Nov 25 10:21:08 crc kubenswrapper[4932]: I1125 10:21:08.297984 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"5afb8b5224b9f6d2fa11c4acc1cecf78fcbf4f0caae59d8d4bbde36efe0769d9"} Nov 25 10:21:08 crc kubenswrapper[4932]: I1125 10:21:08.298468 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2"} Nov 25 10:21:08 crc kubenswrapper[4932]: I1125 10:21:08.298496 4932 scope.go:117] "RemoveContainer" containerID="7df80ef62fe82e50cca07a98a2322c1113bcb1b843559755b074b9c9d3f13e2f" Nov 25 10:21:09 crc kubenswrapper[4932]: I1125 10:21:09.246024 4932 scope.go:117] "RemoveContainer" containerID="0918a9b87d80726389b5b445fc8070e80178521c51ad4a58ebdbb78c2cda4e56" Nov 25 10:21:09 crc kubenswrapper[4932]: I1125 10:21:09.265427 4932 scope.go:117] "RemoveContainer" containerID="49501eb9e126afbb391367340e4ba9d6736fbaa927653fde9494c29f8c97bb97" Nov 25 10:21:09 crc kubenswrapper[4932]: I1125 10:21:09.312876 4932 scope.go:117] "RemoveContainer" containerID="a6dbe64ab2367b875415405c350f7c4d61e02b20f1a65bb56f16be92f5e6784d" Nov 25 10:21:09 crc kubenswrapper[4932]: I1125 10:21:09.346751 4932 scope.go:117] "RemoveContainer" containerID="b17f8147b7816273e9be9b9b4713e69ebbda2ca21b60ff13d304d4717bc94ac4" Nov 25 10:21:09 crc kubenswrapper[4932]: I1125 10:21:09.381887 4932 scope.go:117] "RemoveContainer" containerID="73b50a6a63be3c198b322aeabfef26773e29f835dc0c2cc326fe9be4b35513eb" Nov 25 10:21:09 crc kubenswrapper[4932]: I1125 10:21:09.418560 4932 scope.go:117] "RemoveContainer" containerID="56fb9f6caef65305697bf299aac33393b254c31a2939d30daadd6fd1afcfab86" Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.484221 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5grlj"] Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.486980 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.491944 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5grlj"] Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.575919 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zxvl\" (UniqueName: \"kubernetes.io/projected/f8ab5797-80db-40c5-96e7-b04464e39294-kube-api-access-4zxvl\") pod \"redhat-marketplace-5grlj\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.576012 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-catalog-content\") pod \"redhat-marketplace-5grlj\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.576125 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-utilities\") pod \"redhat-marketplace-5grlj\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.677363 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-utilities\") pod \"redhat-marketplace-5grlj\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.677474 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zxvl\" (UniqueName: \"kubernetes.io/projected/f8ab5797-80db-40c5-96e7-b04464e39294-kube-api-access-4zxvl\") pod \"redhat-marketplace-5grlj\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.677529 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-catalog-content\") pod \"redhat-marketplace-5grlj\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.678050 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-utilities\") pod \"redhat-marketplace-5grlj\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.678062 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-catalog-content\") pod \"redhat-marketplace-5grlj\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.699774 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zxvl\" (UniqueName: \"kubernetes.io/projected/f8ab5797-80db-40c5-96e7-b04464e39294-kube-api-access-4zxvl\") pod \"redhat-marketplace-5grlj\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:01 crc kubenswrapper[4932]: I1125 10:22:01.819214 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:02 crc kubenswrapper[4932]: I1125 10:22:02.340717 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5grlj"] Nov 25 10:22:02 crc kubenswrapper[4932]: I1125 10:22:02.760319 4932 generic.go:334] "Generic (PLEG): container finished" podID="f8ab5797-80db-40c5-96e7-b04464e39294" containerID="8bd27c6914c81168f4782cb19a315139934faac137cfcc248c0f68c6ebf1d30c" exitCode=0 Nov 25 10:22:02 crc kubenswrapper[4932]: I1125 10:22:02.760392 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grlj" event={"ID":"f8ab5797-80db-40c5-96e7-b04464e39294","Type":"ContainerDied","Data":"8bd27c6914c81168f4782cb19a315139934faac137cfcc248c0f68c6ebf1d30c"} Nov 25 10:22:02 crc kubenswrapper[4932]: I1125 10:22:02.760687 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grlj" event={"ID":"f8ab5797-80db-40c5-96e7-b04464e39294","Type":"ContainerStarted","Data":"e1bdea68f579f6b344146dffde23fc531be1176a470a963d28d8fd6958ad467d"} Nov 25 10:22:03 crc kubenswrapper[4932]: I1125 10:22:03.770185 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grlj" event={"ID":"f8ab5797-80db-40c5-96e7-b04464e39294","Type":"ContainerStarted","Data":"2533e36145f9f9cf15754cfc4397a73ea7ec85b63f64dab9590b5b0674f183e7"} Nov 25 10:22:04 crc kubenswrapper[4932]: I1125 10:22:04.783530 4932 generic.go:334] "Generic (PLEG): container finished" podID="f8ab5797-80db-40c5-96e7-b04464e39294" containerID="2533e36145f9f9cf15754cfc4397a73ea7ec85b63f64dab9590b5b0674f183e7" exitCode=0 Nov 25 10:22:04 crc kubenswrapper[4932]: I1125 10:22:04.783588 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grlj" event={"ID":"f8ab5797-80db-40c5-96e7-b04464e39294","Type":"ContainerDied","Data":"2533e36145f9f9cf15754cfc4397a73ea7ec85b63f64dab9590b5b0674f183e7"} Nov 25 10:22:05 crc kubenswrapper[4932]: I1125 10:22:05.795970 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grlj" event={"ID":"f8ab5797-80db-40c5-96e7-b04464e39294","Type":"ContainerStarted","Data":"971f030a2ab3f3af9c5b0c6f251a11332ec7e952101a7c4cb59b1d9fc5fc48e7"} Nov 25 10:22:05 crc kubenswrapper[4932]: I1125 10:22:05.820800 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5grlj" podStartSLOduration=2.418447688 podStartE2EDuration="4.820778828s" podCreationTimestamp="2025-11-25 10:22:01 +0000 UTC" firstStartedPulling="2025-11-25 10:22:02.762556714 +0000 UTC m=+5582.888586277" lastFinishedPulling="2025-11-25 10:22:05.164887854 +0000 UTC m=+5585.290917417" observedRunningTime="2025-11-25 10:22:05.813562531 +0000 UTC m=+5585.939592124" watchObservedRunningTime="2025-11-25 10:22:05.820778828 +0000 UTC m=+5585.946808391" Nov 25 10:22:11 crc kubenswrapper[4932]: I1125 10:22:11.820215 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:11 crc kubenswrapper[4932]: I1125 10:22:11.820765 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:11 crc kubenswrapper[4932]: I1125 10:22:11.869267 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:11 crc kubenswrapper[4932]: I1125 10:22:11.923076 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:12 crc kubenswrapper[4932]: I1125 10:22:12.106993 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5grlj"] Nov 25 10:22:13 crc kubenswrapper[4932]: I1125 10:22:13.859803 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5grlj" podUID="f8ab5797-80db-40c5-96e7-b04464e39294" containerName="registry-server" containerID="cri-o://971f030a2ab3f3af9c5b0c6f251a11332ec7e952101a7c4cb59b1d9fc5fc48e7" gracePeriod=2 Nov 25 10:22:14 crc kubenswrapper[4932]: I1125 10:22:14.880653 4932 generic.go:334] "Generic (PLEG): container finished" podID="f8ab5797-80db-40c5-96e7-b04464e39294" containerID="971f030a2ab3f3af9c5b0c6f251a11332ec7e952101a7c4cb59b1d9fc5fc48e7" exitCode=0 Nov 25 10:22:14 crc kubenswrapper[4932]: I1125 10:22:14.880719 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grlj" event={"ID":"f8ab5797-80db-40c5-96e7-b04464e39294","Type":"ContainerDied","Data":"971f030a2ab3f3af9c5b0c6f251a11332ec7e952101a7c4cb59b1d9fc5fc48e7"} Nov 25 10:22:14 crc kubenswrapper[4932]: I1125 10:22:14.880759 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grlj" event={"ID":"f8ab5797-80db-40c5-96e7-b04464e39294","Type":"ContainerDied","Data":"e1bdea68f579f6b344146dffde23fc531be1176a470a963d28d8fd6958ad467d"} Nov 25 10:22:14 crc kubenswrapper[4932]: I1125 10:22:14.880777 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e1bdea68f579f6b344146dffde23fc531be1176a470a963d28d8fd6958ad467d" Nov 25 10:22:14 crc kubenswrapper[4932]: I1125 10:22:14.913999 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.055230 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-utilities\") pod \"f8ab5797-80db-40c5-96e7-b04464e39294\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.055331 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-catalog-content\") pod \"f8ab5797-80db-40c5-96e7-b04464e39294\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.055372 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zxvl\" (UniqueName: \"kubernetes.io/projected/f8ab5797-80db-40c5-96e7-b04464e39294-kube-api-access-4zxvl\") pod \"f8ab5797-80db-40c5-96e7-b04464e39294\" (UID: \"f8ab5797-80db-40c5-96e7-b04464e39294\") " Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.061430 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8ab5797-80db-40c5-96e7-b04464e39294-kube-api-access-4zxvl" (OuterVolumeSpecName: "kube-api-access-4zxvl") pod "f8ab5797-80db-40c5-96e7-b04464e39294" (UID: "f8ab5797-80db-40c5-96e7-b04464e39294"). InnerVolumeSpecName "kube-api-access-4zxvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.064328 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-utilities" (OuterVolumeSpecName: "utilities") pod "f8ab5797-80db-40c5-96e7-b04464e39294" (UID: "f8ab5797-80db-40c5-96e7-b04464e39294"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.079715 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f8ab5797-80db-40c5-96e7-b04464e39294" (UID: "f8ab5797-80db-40c5-96e7-b04464e39294"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.158758 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.158833 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8ab5797-80db-40c5-96e7-b04464e39294-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.158855 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zxvl\" (UniqueName: \"kubernetes.io/projected/f8ab5797-80db-40c5-96e7-b04464e39294-kube-api-access-4zxvl\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.889847 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5grlj" Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.926023 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5grlj"] Nov 25 10:22:15 crc kubenswrapper[4932]: I1125 10:22:15.933978 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5grlj"] Nov 25 10:22:16 crc kubenswrapper[4932]: I1125 10:22:16.615951 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8ab5797-80db-40c5-96e7-b04464e39294" path="/var/lib/kubelet/pods/f8ab5797-80db-40c5-96e7-b04464e39294/volumes" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.311980 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-j7mmg"] Nov 25 10:22:17 crc kubenswrapper[4932]: E1125 10:22:17.312410 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8ab5797-80db-40c5-96e7-b04464e39294" containerName="extract-content" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.312430 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8ab5797-80db-40c5-96e7-b04464e39294" containerName="extract-content" Nov 25 10:22:17 crc kubenswrapper[4932]: E1125 10:22:17.312449 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8ab5797-80db-40c5-96e7-b04464e39294" containerName="extract-utilities" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.312457 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8ab5797-80db-40c5-96e7-b04464e39294" containerName="extract-utilities" Nov 25 10:22:17 crc kubenswrapper[4932]: E1125 10:22:17.312510 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8ab5797-80db-40c5-96e7-b04464e39294" containerName="registry-server" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.312519 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8ab5797-80db-40c5-96e7-b04464e39294" containerName="registry-server" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.312716 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8ab5797-80db-40c5-96e7-b04464e39294" containerName="registry-server" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.313456 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-j7mmg" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.325740 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-j7mmg"] Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.408785 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-a602-account-create-qjgw7"] Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.409814 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a602-account-create-qjgw7" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.412388 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.416052 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4rv7\" (UniqueName: \"kubernetes.io/projected/3804b75a-8646-4475-8abb-a6f1ffe8a589-kube-api-access-f4rv7\") pod \"barbican-db-create-j7mmg\" (UID: \"3804b75a-8646-4475-8abb-a6f1ffe8a589\") " pod="openstack/barbican-db-create-j7mmg" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.416230 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3804b75a-8646-4475-8abb-a6f1ffe8a589-operator-scripts\") pod \"barbican-db-create-j7mmg\" (UID: \"3804b75a-8646-4475-8abb-a6f1ffe8a589\") " pod="openstack/barbican-db-create-j7mmg" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.420222 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-a602-account-create-qjgw7"] Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.518560 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4rv7\" (UniqueName: \"kubernetes.io/projected/3804b75a-8646-4475-8abb-a6f1ffe8a589-kube-api-access-f4rv7\") pod \"barbican-db-create-j7mmg\" (UID: \"3804b75a-8646-4475-8abb-a6f1ffe8a589\") " pod="openstack/barbican-db-create-j7mmg" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.518638 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11d0a49f-4bc8-4c66-9789-02c65071777f-operator-scripts\") pod \"barbican-a602-account-create-qjgw7\" (UID: \"11d0a49f-4bc8-4c66-9789-02c65071777f\") " pod="openstack/barbican-a602-account-create-qjgw7" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.518694 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3804b75a-8646-4475-8abb-a6f1ffe8a589-operator-scripts\") pod \"barbican-db-create-j7mmg\" (UID: \"3804b75a-8646-4475-8abb-a6f1ffe8a589\") " pod="openstack/barbican-db-create-j7mmg" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.518732 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5ng4\" (UniqueName: \"kubernetes.io/projected/11d0a49f-4bc8-4c66-9789-02c65071777f-kube-api-access-x5ng4\") pod \"barbican-a602-account-create-qjgw7\" (UID: \"11d0a49f-4bc8-4c66-9789-02c65071777f\") " pod="openstack/barbican-a602-account-create-qjgw7" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.519500 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3804b75a-8646-4475-8abb-a6f1ffe8a589-operator-scripts\") pod \"barbican-db-create-j7mmg\" (UID: \"3804b75a-8646-4475-8abb-a6f1ffe8a589\") " pod="openstack/barbican-db-create-j7mmg" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.541011 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4rv7\" (UniqueName: \"kubernetes.io/projected/3804b75a-8646-4475-8abb-a6f1ffe8a589-kube-api-access-f4rv7\") pod \"barbican-db-create-j7mmg\" (UID: \"3804b75a-8646-4475-8abb-a6f1ffe8a589\") " pod="openstack/barbican-db-create-j7mmg" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.620132 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5ng4\" (UniqueName: \"kubernetes.io/projected/11d0a49f-4bc8-4c66-9789-02c65071777f-kube-api-access-x5ng4\") pod \"barbican-a602-account-create-qjgw7\" (UID: \"11d0a49f-4bc8-4c66-9789-02c65071777f\") " pod="openstack/barbican-a602-account-create-qjgw7" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.620604 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11d0a49f-4bc8-4c66-9789-02c65071777f-operator-scripts\") pod \"barbican-a602-account-create-qjgw7\" (UID: \"11d0a49f-4bc8-4c66-9789-02c65071777f\") " pod="openstack/barbican-a602-account-create-qjgw7" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.621390 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11d0a49f-4bc8-4c66-9789-02c65071777f-operator-scripts\") pod \"barbican-a602-account-create-qjgw7\" (UID: \"11d0a49f-4bc8-4c66-9789-02c65071777f\") " pod="openstack/barbican-a602-account-create-qjgw7" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.634147 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-j7mmg" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.638379 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5ng4\" (UniqueName: \"kubernetes.io/projected/11d0a49f-4bc8-4c66-9789-02c65071777f-kube-api-access-x5ng4\") pod \"barbican-a602-account-create-qjgw7\" (UID: \"11d0a49f-4bc8-4c66-9789-02c65071777f\") " pod="openstack/barbican-a602-account-create-qjgw7" Nov 25 10:22:17 crc kubenswrapper[4932]: I1125 10:22:17.727789 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a602-account-create-qjgw7" Nov 25 10:22:18 crc kubenswrapper[4932]: I1125 10:22:18.065467 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-j7mmg"] Nov 25 10:22:18 crc kubenswrapper[4932]: I1125 10:22:18.162980 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-a602-account-create-qjgw7"] Nov 25 10:22:18 crc kubenswrapper[4932]: I1125 10:22:18.917711 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a602-account-create-qjgw7" event={"ID":"11d0a49f-4bc8-4c66-9789-02c65071777f","Type":"ContainerStarted","Data":"55cf75288f2c2f60f78d5895a70e836a0e5cf8189a31b224d621844e141bab94"} Nov 25 10:22:18 crc kubenswrapper[4932]: I1125 10:22:18.918069 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a602-account-create-qjgw7" event={"ID":"11d0a49f-4bc8-4c66-9789-02c65071777f","Type":"ContainerStarted","Data":"dc27736eaad1ba886ddbf4013236613856201fd6a9030958ad07660f55523e5e"} Nov 25 10:22:18 crc kubenswrapper[4932]: I1125 10:22:18.919779 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-j7mmg" event={"ID":"3804b75a-8646-4475-8abb-a6f1ffe8a589","Type":"ContainerStarted","Data":"b5d5627eee4ab8dfb81479b2558194c26b712b5720f96f118b35730869ce76c0"} Nov 25 10:22:18 crc kubenswrapper[4932]: I1125 10:22:18.919842 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-j7mmg" event={"ID":"3804b75a-8646-4475-8abb-a6f1ffe8a589","Type":"ContainerStarted","Data":"ffe15badb8a488f53b84781794b9b116369d5db7611c1895a719df0e8bb8f82d"} Nov 25 10:22:18 crc kubenswrapper[4932]: I1125 10:22:18.935402 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-a602-account-create-qjgw7" podStartSLOduration=1.935370873 podStartE2EDuration="1.935370873s" podCreationTimestamp="2025-11-25 10:22:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:22:18.932889922 +0000 UTC m=+5599.058919495" watchObservedRunningTime="2025-11-25 10:22:18.935370873 +0000 UTC m=+5599.061400486" Nov 25 10:22:18 crc kubenswrapper[4932]: I1125 10:22:18.957854 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-create-j7mmg" podStartSLOduration=1.957825856 podStartE2EDuration="1.957825856s" podCreationTimestamp="2025-11-25 10:22:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:22:18.946846262 +0000 UTC m=+5599.072875835" watchObservedRunningTime="2025-11-25 10:22:18.957825856 +0000 UTC m=+5599.083855439" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.482301 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4nxgq"] Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.484303 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.496795 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4nxgq"] Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.654556 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-catalog-content\") pod \"community-operators-4nxgq\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.654631 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-utilities\") pod \"community-operators-4nxgq\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.654663 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cl8v\" (UniqueName: \"kubernetes.io/projected/777a2478-5ec1-409a-97e8-1673c8bb7c93-kube-api-access-2cl8v\") pod \"community-operators-4nxgq\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.756175 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-catalog-content\") pod \"community-operators-4nxgq\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.756305 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-utilities\") pod \"community-operators-4nxgq\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.756342 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cl8v\" (UniqueName: \"kubernetes.io/projected/777a2478-5ec1-409a-97e8-1673c8bb7c93-kube-api-access-2cl8v\") pod \"community-operators-4nxgq\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.757472 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-catalog-content\") pod \"community-operators-4nxgq\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.758058 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-utilities\") pod \"community-operators-4nxgq\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.788129 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cl8v\" (UniqueName: \"kubernetes.io/projected/777a2478-5ec1-409a-97e8-1673c8bb7c93-kube-api-access-2cl8v\") pod \"community-operators-4nxgq\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.805435 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.944895 4932 generic.go:334] "Generic (PLEG): container finished" podID="11d0a49f-4bc8-4c66-9789-02c65071777f" containerID="55cf75288f2c2f60f78d5895a70e836a0e5cf8189a31b224d621844e141bab94" exitCode=0 Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.945196 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a602-account-create-qjgw7" event={"ID":"11d0a49f-4bc8-4c66-9789-02c65071777f","Type":"ContainerDied","Data":"55cf75288f2c2f60f78d5895a70e836a0e5cf8189a31b224d621844e141bab94"} Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.952234 4932 generic.go:334] "Generic (PLEG): container finished" podID="3804b75a-8646-4475-8abb-a6f1ffe8a589" containerID="b5d5627eee4ab8dfb81479b2558194c26b712b5720f96f118b35730869ce76c0" exitCode=0 Nov 25 10:22:19 crc kubenswrapper[4932]: I1125 10:22:19.952268 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-j7mmg" event={"ID":"3804b75a-8646-4475-8abb-a6f1ffe8a589","Type":"ContainerDied","Data":"b5d5627eee4ab8dfb81479b2558194c26b712b5720f96f118b35730869ce76c0"} Nov 25 10:22:20 crc kubenswrapper[4932]: I1125 10:22:20.303665 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4nxgq"] Nov 25 10:22:20 crc kubenswrapper[4932]: I1125 10:22:20.962775 4932 generic.go:334] "Generic (PLEG): container finished" podID="777a2478-5ec1-409a-97e8-1673c8bb7c93" containerID="983e21eb5cc8fe2f2e9ec0879b504bdbe32f7c3d83a7891e6839c62d1b1fb464" exitCode=0 Nov 25 10:22:20 crc kubenswrapper[4932]: I1125 10:22:20.962878 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nxgq" event={"ID":"777a2478-5ec1-409a-97e8-1673c8bb7c93","Type":"ContainerDied","Data":"983e21eb5cc8fe2f2e9ec0879b504bdbe32f7c3d83a7891e6839c62d1b1fb464"} Nov 25 10:22:20 crc kubenswrapper[4932]: I1125 10:22:20.963121 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nxgq" event={"ID":"777a2478-5ec1-409a-97e8-1673c8bb7c93","Type":"ContainerStarted","Data":"901c4769bc0148ca70396d62c2bcedc52305672332a9a8ce1178300f486b16bf"} Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.307677 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-j7mmg" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.315034 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a602-account-create-qjgw7" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.388866 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4rv7\" (UniqueName: \"kubernetes.io/projected/3804b75a-8646-4475-8abb-a6f1ffe8a589-kube-api-access-f4rv7\") pod \"3804b75a-8646-4475-8abb-a6f1ffe8a589\" (UID: \"3804b75a-8646-4475-8abb-a6f1ffe8a589\") " Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.389082 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3804b75a-8646-4475-8abb-a6f1ffe8a589-operator-scripts\") pod \"3804b75a-8646-4475-8abb-a6f1ffe8a589\" (UID: \"3804b75a-8646-4475-8abb-a6f1ffe8a589\") " Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.389872 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3804b75a-8646-4475-8abb-a6f1ffe8a589-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3804b75a-8646-4475-8abb-a6f1ffe8a589" (UID: "3804b75a-8646-4475-8abb-a6f1ffe8a589"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.393914 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3804b75a-8646-4475-8abb-a6f1ffe8a589-kube-api-access-f4rv7" (OuterVolumeSpecName: "kube-api-access-f4rv7") pod "3804b75a-8646-4475-8abb-a6f1ffe8a589" (UID: "3804b75a-8646-4475-8abb-a6f1ffe8a589"). InnerVolumeSpecName "kube-api-access-f4rv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.490265 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5ng4\" (UniqueName: \"kubernetes.io/projected/11d0a49f-4bc8-4c66-9789-02c65071777f-kube-api-access-x5ng4\") pod \"11d0a49f-4bc8-4c66-9789-02c65071777f\" (UID: \"11d0a49f-4bc8-4c66-9789-02c65071777f\") " Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.490365 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11d0a49f-4bc8-4c66-9789-02c65071777f-operator-scripts\") pod \"11d0a49f-4bc8-4c66-9789-02c65071777f\" (UID: \"11d0a49f-4bc8-4c66-9789-02c65071777f\") " Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.490820 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4rv7\" (UniqueName: \"kubernetes.io/projected/3804b75a-8646-4475-8abb-a6f1ffe8a589-kube-api-access-f4rv7\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.490844 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3804b75a-8646-4475-8abb-a6f1ffe8a589-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.490855 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/11d0a49f-4bc8-4c66-9789-02c65071777f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "11d0a49f-4bc8-4c66-9789-02c65071777f" (UID: "11d0a49f-4bc8-4c66-9789-02c65071777f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.493250 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11d0a49f-4bc8-4c66-9789-02c65071777f-kube-api-access-x5ng4" (OuterVolumeSpecName: "kube-api-access-x5ng4") pod "11d0a49f-4bc8-4c66-9789-02c65071777f" (UID: "11d0a49f-4bc8-4c66-9789-02c65071777f"). InnerVolumeSpecName "kube-api-access-x5ng4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.592043 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/11d0a49f-4bc8-4c66-9789-02c65071777f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.592078 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5ng4\" (UniqueName: \"kubernetes.io/projected/11d0a49f-4bc8-4c66-9789-02c65071777f-kube-api-access-x5ng4\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.982148 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-j7mmg" event={"ID":"3804b75a-8646-4475-8abb-a6f1ffe8a589","Type":"ContainerDied","Data":"ffe15badb8a488f53b84781794b9b116369d5db7611c1895a719df0e8bb8f82d"} Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.982199 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ffe15badb8a488f53b84781794b9b116369d5db7611c1895a719df0e8bb8f82d" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.982199 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-j7mmg" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.987832 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a602-account-create-qjgw7" event={"ID":"11d0a49f-4bc8-4c66-9789-02c65071777f","Type":"ContainerDied","Data":"dc27736eaad1ba886ddbf4013236613856201fd6a9030958ad07660f55523e5e"} Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.987865 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc27736eaad1ba886ddbf4013236613856201fd6a9030958ad07660f55523e5e" Nov 25 10:22:21 crc kubenswrapper[4932]: I1125 10:22:21.987919 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a602-account-create-qjgw7" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.692509 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-n9d9q"] Nov 25 10:22:22 crc kubenswrapper[4932]: E1125 10:22:22.693100 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3804b75a-8646-4475-8abb-a6f1ffe8a589" containerName="mariadb-database-create" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.693120 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3804b75a-8646-4475-8abb-a6f1ffe8a589" containerName="mariadb-database-create" Nov 25 10:22:22 crc kubenswrapper[4932]: E1125 10:22:22.693152 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11d0a49f-4bc8-4c66-9789-02c65071777f" containerName="mariadb-account-create" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.693160 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="11d0a49f-4bc8-4c66-9789-02c65071777f" containerName="mariadb-account-create" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.693359 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3804b75a-8646-4475-8abb-a6f1ffe8a589" containerName="mariadb-database-create" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.693387 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="11d0a49f-4bc8-4c66-9789-02c65071777f" containerName="mariadb-account-create" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.694035 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.696033 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.696364 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-7lfkp" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.701543 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-n9d9q"] Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.817528 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sszww\" (UniqueName: \"kubernetes.io/projected/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-kube-api-access-sszww\") pod \"barbican-db-sync-n9d9q\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.817598 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-combined-ca-bundle\") pod \"barbican-db-sync-n9d9q\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.817683 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-db-sync-config-data\") pod \"barbican-db-sync-n9d9q\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.918993 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sszww\" (UniqueName: \"kubernetes.io/projected/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-kube-api-access-sszww\") pod \"barbican-db-sync-n9d9q\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.919031 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-combined-ca-bundle\") pod \"barbican-db-sync-n9d9q\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.919080 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-db-sync-config-data\") pod \"barbican-db-sync-n9d9q\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.923109 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-combined-ca-bundle\") pod \"barbican-db-sync-n9d9q\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.931944 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-db-sync-config-data\") pod \"barbican-db-sync-n9d9q\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.937090 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sszww\" (UniqueName: \"kubernetes.io/projected/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-kube-api-access-sszww\") pod \"barbican-db-sync-n9d9q\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:22 crc kubenswrapper[4932]: I1125 10:22:22.998644 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nxgq" event={"ID":"777a2478-5ec1-409a-97e8-1673c8bb7c93","Type":"ContainerStarted","Data":"26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264"} Nov 25 10:22:23 crc kubenswrapper[4932]: I1125 10:22:23.071479 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:23 crc kubenswrapper[4932]: I1125 10:22:23.581966 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-n9d9q"] Nov 25 10:22:23 crc kubenswrapper[4932]: W1125 10:22:23.587932 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa464407_8a3b_4d8f_b2ff_9b24a8a523bd.slice/crio-93af7f1de958e9508f311d5259228b4185b9c37c03ccf5087985d4cde9dfab6b WatchSource:0}: Error finding container 93af7f1de958e9508f311d5259228b4185b9c37c03ccf5087985d4cde9dfab6b: Status 404 returned error can't find the container with id 93af7f1de958e9508f311d5259228b4185b9c37c03ccf5087985d4cde9dfab6b Nov 25 10:22:24 crc kubenswrapper[4932]: I1125 10:22:24.012590 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-n9d9q" event={"ID":"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd","Type":"ContainerStarted","Data":"b4fb770e7260d9edc09bde16771161de58218ed6224fbd67095b20946055b9f7"} Nov 25 10:22:24 crc kubenswrapper[4932]: I1125 10:22:24.012938 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-n9d9q" event={"ID":"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd","Type":"ContainerStarted","Data":"93af7f1de958e9508f311d5259228b4185b9c37c03ccf5087985d4cde9dfab6b"} Nov 25 10:22:24 crc kubenswrapper[4932]: I1125 10:22:24.018588 4932 generic.go:334] "Generic (PLEG): container finished" podID="777a2478-5ec1-409a-97e8-1673c8bb7c93" containerID="26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264" exitCode=0 Nov 25 10:22:24 crc kubenswrapper[4932]: I1125 10:22:24.018629 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nxgq" event={"ID":"777a2478-5ec1-409a-97e8-1673c8bb7c93","Type":"ContainerDied","Data":"26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264"} Nov 25 10:22:24 crc kubenswrapper[4932]: I1125 10:22:24.036027 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-n9d9q" podStartSLOduration=2.036007254 podStartE2EDuration="2.036007254s" podCreationTimestamp="2025-11-25 10:22:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:22:24.033231244 +0000 UTC m=+5604.159260817" watchObservedRunningTime="2025-11-25 10:22:24.036007254 +0000 UTC m=+5604.162036807" Nov 25 10:22:28 crc kubenswrapper[4932]: I1125 10:22:28.051586 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nxgq" event={"ID":"777a2478-5ec1-409a-97e8-1673c8bb7c93","Type":"ContainerStarted","Data":"07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b"} Nov 25 10:22:28 crc kubenswrapper[4932]: I1125 10:22:28.075539 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4nxgq" podStartSLOduration=3.133927239 podStartE2EDuration="9.075517058s" podCreationTimestamp="2025-11-25 10:22:19 +0000 UTC" firstStartedPulling="2025-11-25 10:22:20.971088457 +0000 UTC m=+5601.097118020" lastFinishedPulling="2025-11-25 10:22:26.912678266 +0000 UTC m=+5607.038707839" observedRunningTime="2025-11-25 10:22:28.067969912 +0000 UTC m=+5608.193999475" watchObservedRunningTime="2025-11-25 10:22:28.075517058 +0000 UTC m=+5608.201546621" Nov 25 10:22:29 crc kubenswrapper[4932]: I1125 10:22:29.805993 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:29 crc kubenswrapper[4932]: I1125 10:22:29.806436 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:29 crc kubenswrapper[4932]: I1125 10:22:29.850317 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:38 crc kubenswrapper[4932]: I1125 10:22:38.140209 4932 generic.go:334] "Generic (PLEG): container finished" podID="fa464407-8a3b-4d8f-b2ff-9b24a8a523bd" containerID="b4fb770e7260d9edc09bde16771161de58218ed6224fbd67095b20946055b9f7" exitCode=0 Nov 25 10:22:38 crc kubenswrapper[4932]: I1125 10:22:38.140303 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-n9d9q" event={"ID":"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd","Type":"ContainerDied","Data":"b4fb770e7260d9edc09bde16771161de58218ed6224fbd67095b20946055b9f7"} Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.522895 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.629710 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-db-sync-config-data\") pod \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.629946 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sszww\" (UniqueName: \"kubernetes.io/projected/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-kube-api-access-sszww\") pod \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.629993 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-combined-ca-bundle\") pod \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\" (UID: \"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd\") " Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.635311 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "fa464407-8a3b-4d8f-b2ff-9b24a8a523bd" (UID: "fa464407-8a3b-4d8f-b2ff-9b24a8a523bd"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.635379 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-kube-api-access-sszww" (OuterVolumeSpecName: "kube-api-access-sszww") pod "fa464407-8a3b-4d8f-b2ff-9b24a8a523bd" (UID: "fa464407-8a3b-4d8f-b2ff-9b24a8a523bd"). InnerVolumeSpecName "kube-api-access-sszww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.659971 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fa464407-8a3b-4d8f-b2ff-9b24a8a523bd" (UID: "fa464407-8a3b-4d8f-b2ff-9b24a8a523bd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.732226 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sszww\" (UniqueName: \"kubernetes.io/projected/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-kube-api-access-sszww\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.732266 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.732278 4932 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.849407 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:39 crc kubenswrapper[4932]: I1125 10:22:39.896307 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4nxgq"] Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.162824 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-n9d9q" event={"ID":"fa464407-8a3b-4d8f-b2ff-9b24a8a523bd","Type":"ContainerDied","Data":"93af7f1de958e9508f311d5259228b4185b9c37c03ccf5087985d4cde9dfab6b"} Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.162851 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-n9d9q" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.162869 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93af7f1de958e9508f311d5259228b4185b9c37c03ccf5087985d4cde9dfab6b" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.162984 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4nxgq" podUID="777a2478-5ec1-409a-97e8-1673c8bb7c93" containerName="registry-server" containerID="cri-o://07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b" gracePeriod=2 Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.488715 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-77b96559f5-mgnmp"] Nov 25 10:22:40 crc kubenswrapper[4932]: E1125 10:22:40.489954 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa464407-8a3b-4d8f-b2ff-9b24a8a523bd" containerName="barbican-db-sync" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.489984 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa464407-8a3b-4d8f-b2ff-9b24a8a523bd" containerName="barbican-db-sync" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.490754 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa464407-8a3b-4d8f-b2ff-9b24a8a523bd" containerName="barbican-db-sync" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.492491 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.503038 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-77b96559f5-mgnmp"] Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.513412 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.513520 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-7lfkp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.513661 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.540240 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx"] Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.551648 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-d8667c859-265pc"] Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.553271 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.553851 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.557257 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03d62bf1-a859-47ec-8302-6e967081793a-config-data-custom\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.557373 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sblg\" (UniqueName: \"kubernetes.io/projected/03d62bf1-a859-47ec-8302-6e967081793a-kube-api-access-8sblg\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.557460 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03d62bf1-a859-47ec-8302-6e967081793a-config-data\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.557548 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03d62bf1-a859-47ec-8302-6e967081793a-logs\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.557572 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03d62bf1-a859-47ec-8302-6e967081793a-combined-ca-bundle\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.557602 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.573252 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx"] Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.583428 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d8667c859-265pc"] Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659120 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03d62bf1-a859-47ec-8302-6e967081793a-config-data-custom\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659242 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-dns-svc\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659265 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sblg\" (UniqueName: \"kubernetes.io/projected/03d62bf1-a859-47ec-8302-6e967081793a-kube-api-access-8sblg\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659283 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-config-data-custom\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659327 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-logs\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659381 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-nb\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659397 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-config\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659429 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03d62bf1-a859-47ec-8302-6e967081793a-config-data\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659456 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-sb\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659483 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgfpm\" (UniqueName: \"kubernetes.io/projected/cb12d6e6-7da1-4090-9a49-d731c142d257-kube-api-access-sgfpm\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659509 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-config-data\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659526 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-combined-ca-bundle\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659550 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb2zr\" (UniqueName: \"kubernetes.io/projected/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-kube-api-access-qb2zr\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659572 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03d62bf1-a859-47ec-8302-6e967081793a-logs\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.659589 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03d62bf1-a859-47ec-8302-6e967081793a-combined-ca-bundle\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.663281 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-9f97bdc5d-sxm8c"] Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.664689 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03d62bf1-a859-47ec-8302-6e967081793a-logs\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.670702 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.671616 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03d62bf1-a859-47ec-8302-6e967081793a-config-data-custom\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.677449 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-9f97bdc5d-sxm8c"] Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.685600 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.688384 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03d62bf1-a859-47ec-8302-6e967081793a-config-data\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.693889 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03d62bf1-a859-47ec-8302-6e967081793a-combined-ca-bundle\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.701453 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sblg\" (UniqueName: \"kubernetes.io/projected/03d62bf1-a859-47ec-8302-6e967081793a-kube-api-access-8sblg\") pod \"barbican-worker-77b96559f5-mgnmp\" (UID: \"03d62bf1-a859-47ec-8302-6e967081793a\") " pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.761770 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-nb\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.761825 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-config\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.761876 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-sb\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.761904 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.761942 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgfpm\" (UniqueName: \"kubernetes.io/projected/cb12d6e6-7da1-4090-9a49-d731c142d257-kube-api-access-sgfpm\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.761972 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-config-data\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.761998 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-combined-ca-bundle\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.762032 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb2zr\" (UniqueName: \"kubernetes.io/projected/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-kube-api-access-qb2zr\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.762067 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9dfc\" (UniqueName: \"kubernetes.io/projected/556da26d-40db-42ce-85fe-96501eee7604-kube-api-access-h9dfc\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.762103 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-combined-ca-bundle\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.762154 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data-custom\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.762206 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/556da26d-40db-42ce-85fe-96501eee7604-logs\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.762252 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-dns-svc\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.762279 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-config-data-custom\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.762311 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-logs\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.763182 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-logs\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.763845 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-sb\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.764004 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-dns-svc\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.764546 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-nb\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.765044 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-config\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.772472 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-config-data\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.772959 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-combined-ca-bundle\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.785946 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-config-data-custom\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.788645 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgfpm\" (UniqueName: \"kubernetes.io/projected/cb12d6e6-7da1-4090-9a49-d731c142d257-kube-api-access-sgfpm\") pod \"dnsmasq-dns-d8667c859-265pc\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.791620 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb2zr\" (UniqueName: \"kubernetes.io/projected/3a6c25ec-8a61-44cb-a637-8a6130aac7a4-kube-api-access-qb2zr\") pod \"barbican-keystone-listener-f7ffdd4f4-d9kpx\" (UID: \"3a6c25ec-8a61-44cb-a637-8a6130aac7a4\") " pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.858673 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-77b96559f5-mgnmp" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.868113 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.868200 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9dfc\" (UniqueName: \"kubernetes.io/projected/556da26d-40db-42ce-85fe-96501eee7604-kube-api-access-h9dfc\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.868232 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-combined-ca-bundle\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.868273 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data-custom\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.868295 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/556da26d-40db-42ce-85fe-96501eee7604-logs\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.868902 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/556da26d-40db-42ce-85fe-96501eee7604-logs\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.872478 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-combined-ca-bundle\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.873405 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.874398 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.876527 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data-custom\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.884629 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.889063 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9dfc\" (UniqueName: \"kubernetes.io/projected/556da26d-40db-42ce-85fe-96501eee7604-kube-api-access-h9dfc\") pod \"barbican-api-9f97bdc5d-sxm8c\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.898040 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.969259 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-catalog-content\") pod \"777a2478-5ec1-409a-97e8-1673c8bb7c93\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.969320 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-utilities\") pod \"777a2478-5ec1-409a-97e8-1673c8bb7c93\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.969395 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cl8v\" (UniqueName: \"kubernetes.io/projected/777a2478-5ec1-409a-97e8-1673c8bb7c93-kube-api-access-2cl8v\") pod \"777a2478-5ec1-409a-97e8-1673c8bb7c93\" (UID: \"777a2478-5ec1-409a-97e8-1673c8bb7c93\") " Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.970486 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-utilities" (OuterVolumeSpecName: "utilities") pod "777a2478-5ec1-409a-97e8-1673c8bb7c93" (UID: "777a2478-5ec1-409a-97e8-1673c8bb7c93"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:22:40 crc kubenswrapper[4932]: I1125 10:22:40.974624 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/777a2478-5ec1-409a-97e8-1673c8bb7c93-kube-api-access-2cl8v" (OuterVolumeSpecName: "kube-api-access-2cl8v") pod "777a2478-5ec1-409a-97e8-1673c8bb7c93" (UID: "777a2478-5ec1-409a-97e8-1673c8bb7c93"). InnerVolumeSpecName "kube-api-access-2cl8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.039662 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "777a2478-5ec1-409a-97e8-1673c8bb7c93" (UID: "777a2478-5ec1-409a-97e8-1673c8bb7c93"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.071256 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cl8v\" (UniqueName: \"kubernetes.io/projected/777a2478-5ec1-409a-97e8-1673c8bb7c93-kube-api-access-2cl8v\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.071291 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.071300 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/777a2478-5ec1-409a-97e8-1673c8bb7c93-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.155864 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.179671 4932 generic.go:334] "Generic (PLEG): container finished" podID="777a2478-5ec1-409a-97e8-1673c8bb7c93" containerID="07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b" exitCode=0 Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.179714 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nxgq" event={"ID":"777a2478-5ec1-409a-97e8-1673c8bb7c93","Type":"ContainerDied","Data":"07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b"} Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.179740 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4nxgq" event={"ID":"777a2478-5ec1-409a-97e8-1673c8bb7c93","Type":"ContainerDied","Data":"901c4769bc0148ca70396d62c2bcedc52305672332a9a8ce1178300f486b16bf"} Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.179758 4932 scope.go:117] "RemoveContainer" containerID="07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.179871 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4nxgq" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.243334 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4nxgq"] Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.255323 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4nxgq"] Nov 25 10:22:41 crc kubenswrapper[4932]: W1125 10:22:41.285156 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb12d6e6_7da1_4090_9a49_d731c142d257.slice/crio-d56388d1adf4f408ee9d2fe6f3c4b8f22f4cd21e737cf06e7fdf0deb28673e78 WatchSource:0}: Error finding container d56388d1adf4f408ee9d2fe6f3c4b8f22f4cd21e737cf06e7fdf0deb28673e78: Status 404 returned error can't find the container with id d56388d1adf4f408ee9d2fe6f3c4b8f22f4cd21e737cf06e7fdf0deb28673e78 Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.295521 4932 scope.go:117] "RemoveContainer" containerID="26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.300420 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-d8667c859-265pc"] Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.310394 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx"] Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.335659 4932 scope.go:117] "RemoveContainer" containerID="983e21eb5cc8fe2f2e9ec0879b504bdbe32f7c3d83a7891e6839c62d1b1fb464" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.383823 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-77b96559f5-mgnmp"] Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.386029 4932 scope.go:117] "RemoveContainer" containerID="07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b" Nov 25 10:22:41 crc kubenswrapper[4932]: E1125 10:22:41.386854 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b\": container with ID starting with 07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b not found: ID does not exist" containerID="07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.386895 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b"} err="failed to get container status \"07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b\": rpc error: code = NotFound desc = could not find container \"07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b\": container with ID starting with 07210e38fea146d6abcd28d990ec55a8c16351749b058b36fab9bb9b984bf75b not found: ID does not exist" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.386920 4932 scope.go:117] "RemoveContainer" containerID="26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264" Nov 25 10:22:41 crc kubenswrapper[4932]: E1125 10:22:41.387229 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264\": container with ID starting with 26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264 not found: ID does not exist" containerID="26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.387264 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264"} err="failed to get container status \"26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264\": rpc error: code = NotFound desc = could not find container \"26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264\": container with ID starting with 26a8a6d9c4a38f2ddd84deb65a5b9bfd95083e69ed73e4b61d45748771b07264 not found: ID does not exist" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.387285 4932 scope.go:117] "RemoveContainer" containerID="983e21eb5cc8fe2f2e9ec0879b504bdbe32f7c3d83a7891e6839c62d1b1fb464" Nov 25 10:22:41 crc kubenswrapper[4932]: E1125 10:22:41.387868 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"983e21eb5cc8fe2f2e9ec0879b504bdbe32f7c3d83a7891e6839c62d1b1fb464\": container with ID starting with 983e21eb5cc8fe2f2e9ec0879b504bdbe32f7c3d83a7891e6839c62d1b1fb464 not found: ID does not exist" containerID="983e21eb5cc8fe2f2e9ec0879b504bdbe32f7c3d83a7891e6839c62d1b1fb464" Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.387901 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"983e21eb5cc8fe2f2e9ec0879b504bdbe32f7c3d83a7891e6839c62d1b1fb464"} err="failed to get container status \"983e21eb5cc8fe2f2e9ec0879b504bdbe32f7c3d83a7891e6839c62d1b1fb464\": rpc error: code = NotFound desc = could not find container \"983e21eb5cc8fe2f2e9ec0879b504bdbe32f7c3d83a7891e6839c62d1b1fb464\": container with ID starting with 983e21eb5cc8fe2f2e9ec0879b504bdbe32f7c3d83a7891e6839c62d1b1fb464 not found: ID does not exist" Nov 25 10:22:41 crc kubenswrapper[4932]: W1125 10:22:41.435420 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03d62bf1_a859_47ec_8302_6e967081793a.slice/crio-0039495ca7774e527ff0723c99319fef2f3de262ba0a0785ae7f9acef6cda2f8 WatchSource:0}: Error finding container 0039495ca7774e527ff0723c99319fef2f3de262ba0a0785ae7f9acef6cda2f8: Status 404 returned error can't find the container with id 0039495ca7774e527ff0723c99319fef2f3de262ba0a0785ae7f9acef6cda2f8 Nov 25 10:22:41 crc kubenswrapper[4932]: I1125 10:22:41.647971 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-9f97bdc5d-sxm8c"] Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.193462 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9f97bdc5d-sxm8c" event={"ID":"556da26d-40db-42ce-85fe-96501eee7604","Type":"ContainerStarted","Data":"f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3"} Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.193787 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9f97bdc5d-sxm8c" event={"ID":"556da26d-40db-42ce-85fe-96501eee7604","Type":"ContainerStarted","Data":"d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911"} Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.193807 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9f97bdc5d-sxm8c" event={"ID":"556da26d-40db-42ce-85fe-96501eee7604","Type":"ContainerStarted","Data":"999bdd9c91c786e6fd62dbd5d7de062cf375bc35abb01e4526d03a44d474ae00"} Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.196086 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" event={"ID":"3a6c25ec-8a61-44cb-a637-8a6130aac7a4","Type":"ContainerStarted","Data":"e509bb6b71bbd16fd32195c9c3e34141a46ab1c8f743a7672f4cae085b4c0b55"} Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.196148 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" event={"ID":"3a6c25ec-8a61-44cb-a637-8a6130aac7a4","Type":"ContainerStarted","Data":"2771200584ec7540279bc0c46e36943f5cd2b1cedb9423dbfe8138f6d80cd6b4"} Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.196165 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" event={"ID":"3a6c25ec-8a61-44cb-a637-8a6130aac7a4","Type":"ContainerStarted","Data":"4811096fbda4d8ce4ce23632ccbdd56c64c7b046ec9461fa0e9c3ab3a9376b6f"} Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.198675 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-77b96559f5-mgnmp" event={"ID":"03d62bf1-a859-47ec-8302-6e967081793a","Type":"ContainerStarted","Data":"e71a142a4a531c52bb3e9f90091a1eeaa815fb09fbe51fb703ec345b311193d8"} Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.198713 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-77b96559f5-mgnmp" event={"ID":"03d62bf1-a859-47ec-8302-6e967081793a","Type":"ContainerStarted","Data":"a2f8f359730d172e46410fd61c8661998f90bf6ef3a190a02b437faa9c78dabf"} Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.198723 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-77b96559f5-mgnmp" event={"ID":"03d62bf1-a859-47ec-8302-6e967081793a","Type":"ContainerStarted","Data":"0039495ca7774e527ff0723c99319fef2f3de262ba0a0785ae7f9acef6cda2f8"} Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.200647 4932 generic.go:334] "Generic (PLEG): container finished" podID="cb12d6e6-7da1-4090-9a49-d731c142d257" containerID="178a7bdc374b0c9d6505e923a29f696c9e65b7cac3d544a06adb894faccb94b6" exitCode=0 Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.200709 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d8667c859-265pc" event={"ID":"cb12d6e6-7da1-4090-9a49-d731c142d257","Type":"ContainerDied","Data":"178a7bdc374b0c9d6505e923a29f696c9e65b7cac3d544a06adb894faccb94b6"} Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.200729 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d8667c859-265pc" event={"ID":"cb12d6e6-7da1-4090-9a49-d731c142d257","Type":"ContainerStarted","Data":"d56388d1adf4f408ee9d2fe6f3c4b8f22f4cd21e737cf06e7fdf0deb28673e78"} Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.229168 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-f7ffdd4f4-d9kpx" podStartSLOduration=2.229135626 podStartE2EDuration="2.229135626s" podCreationTimestamp="2025-11-25 10:22:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:22:42.228364844 +0000 UTC m=+5622.354394407" watchObservedRunningTime="2025-11-25 10:22:42.229135626 +0000 UTC m=+5622.355165189" Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.263808 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-77b96559f5-mgnmp" podStartSLOduration=2.263782679 podStartE2EDuration="2.263782679s" podCreationTimestamp="2025-11-25 10:22:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:22:42.25755473 +0000 UTC m=+5622.383584283" watchObservedRunningTime="2025-11-25 10:22:42.263782679 +0000 UTC m=+5622.389812242" Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.632726 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="777a2478-5ec1-409a-97e8-1673c8bb7c93" path="/var/lib/kubelet/pods/777a2478-5ec1-409a-97e8-1673c8bb7c93/volumes" Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.880492 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6f4846cd84-7wgdx"] Nov 25 10:22:42 crc kubenswrapper[4932]: E1125 10:22:42.881239 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="777a2478-5ec1-409a-97e8-1673c8bb7c93" containerName="extract-utilities" Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.881258 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="777a2478-5ec1-409a-97e8-1673c8bb7c93" containerName="extract-utilities" Nov 25 10:22:42 crc kubenswrapper[4932]: E1125 10:22:42.881286 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="777a2478-5ec1-409a-97e8-1673c8bb7c93" containerName="extract-content" Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.881293 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="777a2478-5ec1-409a-97e8-1673c8bb7c93" containerName="extract-content" Nov 25 10:22:42 crc kubenswrapper[4932]: E1125 10:22:42.881314 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="777a2478-5ec1-409a-97e8-1673c8bb7c93" containerName="registry-server" Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.881323 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="777a2478-5ec1-409a-97e8-1673c8bb7c93" containerName="registry-server" Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.881523 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="777a2478-5ec1-409a-97e8-1673c8bb7c93" containerName="registry-server" Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.882637 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.886391 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.886643 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 10:22:42 crc kubenswrapper[4932]: I1125 10:22:42.892143 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6f4846cd84-7wgdx"] Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.014321 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-config-data\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.014414 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-public-tls-certs\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.014475 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0734c01f-574e-4f75-9c2f-77f57b292d28-logs\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.014530 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-internal-tls-certs\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.014567 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz7dn\" (UniqueName: \"kubernetes.io/projected/0734c01f-574e-4f75-9c2f-77f57b292d28-kube-api-access-dz7dn\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.014663 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-config-data-custom\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.014714 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-combined-ca-bundle\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.116563 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-combined-ca-bundle\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.116622 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-config-data\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.116664 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-public-tls-certs\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.116698 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0734c01f-574e-4f75-9c2f-77f57b292d28-logs\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.116719 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-internal-tls-certs\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.116743 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dz7dn\" (UniqueName: \"kubernetes.io/projected/0734c01f-574e-4f75-9c2f-77f57b292d28-kube-api-access-dz7dn\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.116793 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-config-data-custom\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.117235 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0734c01f-574e-4f75-9c2f-77f57b292d28-logs\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.121983 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-internal-tls-certs\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.124011 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-config-data\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.124045 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-public-tls-certs\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.124247 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-combined-ca-bundle\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.124346 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0734c01f-574e-4f75-9c2f-77f57b292d28-config-data-custom\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.137568 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dz7dn\" (UniqueName: \"kubernetes.io/projected/0734c01f-574e-4f75-9c2f-77f57b292d28-kube-api-access-dz7dn\") pod \"barbican-api-6f4846cd84-7wgdx\" (UID: \"0734c01f-574e-4f75-9c2f-77f57b292d28\") " pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.212393 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.216288 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d8667c859-265pc" event={"ID":"cb12d6e6-7da1-4090-9a49-d731c142d257","Type":"ContainerStarted","Data":"e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775"} Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.244946 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-d8667c859-265pc" podStartSLOduration=3.244910563 podStartE2EDuration="3.244910563s" podCreationTimestamp="2025-11-25 10:22:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:22:43.237663106 +0000 UTC m=+5623.363692689" watchObservedRunningTime="2025-11-25 10:22:43.244910563 +0000 UTC m=+5623.370940126" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.265449 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-9f97bdc5d-sxm8c" podStartSLOduration=3.265427571 podStartE2EDuration="3.265427571s" podCreationTimestamp="2025-11-25 10:22:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:22:43.262264731 +0000 UTC m=+5623.388294304" watchObservedRunningTime="2025-11-25 10:22:43.265427571 +0000 UTC m=+5623.391457134" Nov 25 10:22:43 crc kubenswrapper[4932]: I1125 10:22:43.702141 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6f4846cd84-7wgdx"] Nov 25 10:22:43 crc kubenswrapper[4932]: W1125 10:22:43.710085 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0734c01f_574e_4f75_9c2f_77f57b292d28.slice/crio-0f2eb97d179ef512f2017723d5dbc85069327e5a64b1a7c3f820085d82cfb414 WatchSource:0}: Error finding container 0f2eb97d179ef512f2017723d5dbc85069327e5a64b1a7c3f820085d82cfb414: Status 404 returned error can't find the container with id 0f2eb97d179ef512f2017723d5dbc85069327e5a64b1a7c3f820085d82cfb414 Nov 25 10:22:44 crc kubenswrapper[4932]: I1125 10:22:44.225731 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6f4846cd84-7wgdx" event={"ID":"0734c01f-574e-4f75-9c2f-77f57b292d28","Type":"ContainerStarted","Data":"55aa1ac750f5c0c876d444794b003989c93eca6792cc77108096fab9fdf28e8f"} Nov 25 10:22:44 crc kubenswrapper[4932]: I1125 10:22:44.226124 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:44 crc kubenswrapper[4932]: I1125 10:22:44.226146 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6f4846cd84-7wgdx" event={"ID":"0734c01f-574e-4f75-9c2f-77f57b292d28","Type":"ContainerStarted","Data":"9c892c906c90e231a240d069eb9f9a0c4a9928d0014bd081ece762df909f89ce"} Nov 25 10:22:44 crc kubenswrapper[4932]: I1125 10:22:44.226164 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:44 crc kubenswrapper[4932]: I1125 10:22:44.226175 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6f4846cd84-7wgdx" event={"ID":"0734c01f-574e-4f75-9c2f-77f57b292d28","Type":"ContainerStarted","Data":"0f2eb97d179ef512f2017723d5dbc85069327e5a64b1a7c3f820085d82cfb414"} Nov 25 10:22:44 crc kubenswrapper[4932]: I1125 10:22:44.254340 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6f4846cd84-7wgdx" podStartSLOduration=2.254321619 podStartE2EDuration="2.254321619s" podCreationTimestamp="2025-11-25 10:22:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:22:44.251643862 +0000 UTC m=+5624.377673435" watchObservedRunningTime="2025-11-25 10:22:44.254321619 +0000 UTC m=+5624.380351182" Nov 25 10:22:45 crc kubenswrapper[4932]: I1125 10:22:45.233718 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:46 crc kubenswrapper[4932]: I1125 10:22:46.156755 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:46 crc kubenswrapper[4932]: I1125 10:22:46.157632 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:46 crc kubenswrapper[4932]: I1125 10:22:46.245380 4932 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:22:47 crc kubenswrapper[4932]: I1125 10:22:47.855590 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:49 crc kubenswrapper[4932]: I1125 10:22:49.300361 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:49 crc kubenswrapper[4932]: I1125 10:22:49.819407 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:50 crc kubenswrapper[4932]: I1125 10:22:50.887382 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:22:50 crc kubenswrapper[4932]: I1125 10:22:50.944728 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78f594b499-vhzbh"] Nov 25 10:22:50 crc kubenswrapper[4932]: I1125 10:22:50.944991 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" podUID="aeceb7b7-798f-4dab-bc6d-d7cafbd68898" containerName="dnsmasq-dns" containerID="cri-o://b47cf42e69db299493fe73a0a4ea483afa18054b9a372a81ab59861aa978bb0f" gracePeriod=10 Nov 25 10:22:51 crc kubenswrapper[4932]: I1125 10:22:51.290694 4932 generic.go:334] "Generic (PLEG): container finished" podID="aeceb7b7-798f-4dab-bc6d-d7cafbd68898" containerID="b47cf42e69db299493fe73a0a4ea483afa18054b9a372a81ab59861aa978bb0f" exitCode=0 Nov 25 10:22:51 crc kubenswrapper[4932]: I1125 10:22:51.290747 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" event={"ID":"aeceb7b7-798f-4dab-bc6d-d7cafbd68898","Type":"ContainerDied","Data":"b47cf42e69db299493fe73a0a4ea483afa18054b9a372a81ab59861aa978bb0f"} Nov 25 10:22:51 crc kubenswrapper[4932]: I1125 10:22:51.313136 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6f4846cd84-7wgdx" Nov 25 10:22:51 crc kubenswrapper[4932]: I1125 10:22:51.372690 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-9f97bdc5d-sxm8c"] Nov 25 10:22:51 crc kubenswrapper[4932]: I1125 10:22:51.373257 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-9f97bdc5d-sxm8c" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api-log" containerID="cri-o://d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911" gracePeriod=30 Nov 25 10:22:51 crc kubenswrapper[4932]: I1125 10:22:51.373289 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-9f97bdc5d-sxm8c" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api" containerID="cri-o://f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3" gracePeriod=30 Nov 25 10:22:51 crc kubenswrapper[4932]: I1125 10:22:51.387746 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-9f97bdc5d-sxm8c" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": EOF" Nov 25 10:22:51 crc kubenswrapper[4932]: I1125 10:22:51.387968 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-9f97bdc5d-sxm8c" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": EOF" Nov 25 10:22:51 crc kubenswrapper[4932]: I1125 10:22:51.388159 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-9f97bdc5d-sxm8c" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": EOF" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.024830 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.113579 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-config\") pod \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.113626 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9l5nq\" (UniqueName: \"kubernetes.io/projected/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-kube-api-access-9l5nq\") pod \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.113662 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-dns-svc\") pod \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.113708 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-nb\") pod \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.113771 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-sb\") pod \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\" (UID: \"aeceb7b7-798f-4dab-bc6d-d7cafbd68898\") " Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.119070 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-kube-api-access-9l5nq" (OuterVolumeSpecName: "kube-api-access-9l5nq") pod "aeceb7b7-798f-4dab-bc6d-d7cafbd68898" (UID: "aeceb7b7-798f-4dab-bc6d-d7cafbd68898"). InnerVolumeSpecName "kube-api-access-9l5nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.178550 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "aeceb7b7-798f-4dab-bc6d-d7cafbd68898" (UID: "aeceb7b7-798f-4dab-bc6d-d7cafbd68898"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.183071 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "aeceb7b7-798f-4dab-bc6d-d7cafbd68898" (UID: "aeceb7b7-798f-4dab-bc6d-d7cafbd68898"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.185530 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "aeceb7b7-798f-4dab-bc6d-d7cafbd68898" (UID: "aeceb7b7-798f-4dab-bc6d-d7cafbd68898"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.189768 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-config" (OuterVolumeSpecName: "config") pod "aeceb7b7-798f-4dab-bc6d-d7cafbd68898" (UID: "aeceb7b7-798f-4dab-bc6d-d7cafbd68898"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.216652 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.216927 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.216938 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.216946 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.216956 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9l5nq\" (UniqueName: \"kubernetes.io/projected/aeceb7b7-798f-4dab-bc6d-d7cafbd68898-kube-api-access-9l5nq\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.300421 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" event={"ID":"aeceb7b7-798f-4dab-bc6d-d7cafbd68898","Type":"ContainerDied","Data":"59bcb4df80169430bf1cdb5bf7c27612dcb19478a2efac3b53ccac568cce69fa"} Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.300445 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78f594b499-vhzbh" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.300512 4932 scope.go:117] "RemoveContainer" containerID="b47cf42e69db299493fe73a0a4ea483afa18054b9a372a81ab59861aa978bb0f" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.302691 4932 generic.go:334] "Generic (PLEG): container finished" podID="556da26d-40db-42ce-85fe-96501eee7604" containerID="d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911" exitCode=143 Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.302723 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9f97bdc5d-sxm8c" event={"ID":"556da26d-40db-42ce-85fe-96501eee7604","Type":"ContainerDied","Data":"d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911"} Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.326301 4932 scope.go:117] "RemoveContainer" containerID="8e590a0c40d46bf38414a850b854c7922e6abd8640d3c07733f1eb4001552fac" Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.329337 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78f594b499-vhzbh"] Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.339487 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78f594b499-vhzbh"] Nov 25 10:22:52 crc kubenswrapper[4932]: I1125 10:22:52.617339 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aeceb7b7-798f-4dab-bc6d-d7cafbd68898" path="/var/lib/kubelet/pods/aeceb7b7-798f-4dab-bc6d-d7cafbd68898/volumes" Nov 25 10:22:56 crc kubenswrapper[4932]: I1125 10:22:56.784925 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-9f97bdc5d-sxm8c" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": read tcp 10.217.0.2:53112->10.217.1.41:9311: read: connection reset by peer" Nov 25 10:22:56 crc kubenswrapper[4932]: I1125 10:22:56.786342 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-9f97bdc5d-sxm8c" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.1.41:9311/healthcheck\": read tcp 10.217.0.2:53104->10.217.1.41:9311: read: connection reset by peer" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.243972 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.330546 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data-custom\") pod \"556da26d-40db-42ce-85fe-96501eee7604\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.330591 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-combined-ca-bundle\") pod \"556da26d-40db-42ce-85fe-96501eee7604\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.330774 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9dfc\" (UniqueName: \"kubernetes.io/projected/556da26d-40db-42ce-85fe-96501eee7604-kube-api-access-h9dfc\") pod \"556da26d-40db-42ce-85fe-96501eee7604\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.330816 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data\") pod \"556da26d-40db-42ce-85fe-96501eee7604\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.330858 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/556da26d-40db-42ce-85fe-96501eee7604-logs\") pod \"556da26d-40db-42ce-85fe-96501eee7604\" (UID: \"556da26d-40db-42ce-85fe-96501eee7604\") " Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.331533 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/556da26d-40db-42ce-85fe-96501eee7604-logs" (OuterVolumeSpecName: "logs") pod "556da26d-40db-42ce-85fe-96501eee7604" (UID: "556da26d-40db-42ce-85fe-96501eee7604"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.336044 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/556da26d-40db-42ce-85fe-96501eee7604-kube-api-access-h9dfc" (OuterVolumeSpecName: "kube-api-access-h9dfc") pod "556da26d-40db-42ce-85fe-96501eee7604" (UID: "556da26d-40db-42ce-85fe-96501eee7604"). InnerVolumeSpecName "kube-api-access-h9dfc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.336086 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "556da26d-40db-42ce-85fe-96501eee7604" (UID: "556da26d-40db-42ce-85fe-96501eee7604"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.356771 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "556da26d-40db-42ce-85fe-96501eee7604" (UID: "556da26d-40db-42ce-85fe-96501eee7604"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.378467 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data" (OuterVolumeSpecName: "config-data") pod "556da26d-40db-42ce-85fe-96501eee7604" (UID: "556da26d-40db-42ce-85fe-96501eee7604"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.400058 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-9f97bdc5d-sxm8c" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.400173 4932 generic.go:334] "Generic (PLEG): container finished" podID="556da26d-40db-42ce-85fe-96501eee7604" containerID="f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3" exitCode=0 Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.400239 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9f97bdc5d-sxm8c" event={"ID":"556da26d-40db-42ce-85fe-96501eee7604","Type":"ContainerDied","Data":"f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3"} Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.400276 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-9f97bdc5d-sxm8c" event={"ID":"556da26d-40db-42ce-85fe-96501eee7604","Type":"ContainerDied","Data":"999bdd9c91c786e6fd62dbd5d7de062cf375bc35abb01e4526d03a44d474ae00"} Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.400296 4932 scope.go:117] "RemoveContainer" containerID="f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.433672 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.433704 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.433713 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9dfc\" (UniqueName: \"kubernetes.io/projected/556da26d-40db-42ce-85fe-96501eee7604-kube-api-access-h9dfc\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.433723 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/556da26d-40db-42ce-85fe-96501eee7604-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.433732 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/556da26d-40db-42ce-85fe-96501eee7604-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.449812 4932 scope.go:117] "RemoveContainer" containerID="d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.450009 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-9f97bdc5d-sxm8c"] Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.456591 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-9f97bdc5d-sxm8c"] Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.475705 4932 scope.go:117] "RemoveContainer" containerID="f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3" Nov 25 10:22:57 crc kubenswrapper[4932]: E1125 10:22:57.476379 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3\": container with ID starting with f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3 not found: ID does not exist" containerID="f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.476411 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3"} err="failed to get container status \"f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3\": rpc error: code = NotFound desc = could not find container \"f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3\": container with ID starting with f69fec7bed65ad18a56b7df6b738dddf875f2f1a9c5d5a3a650827e3ed848df3 not found: ID does not exist" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.476433 4932 scope.go:117] "RemoveContainer" containerID="d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911" Nov 25 10:22:57 crc kubenswrapper[4932]: E1125 10:22:57.476847 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911\": container with ID starting with d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911 not found: ID does not exist" containerID="d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911" Nov 25 10:22:57 crc kubenswrapper[4932]: I1125 10:22:57.476870 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911"} err="failed to get container status \"d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911\": rpc error: code = NotFound desc = could not find container \"d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911\": container with ID starting with d7e1a4cb5d90200507558f52b0509fa08e799ea168f137d540731a9245129911 not found: ID does not exist" Nov 25 10:22:58 crc kubenswrapper[4932]: I1125 10:22:58.616483 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="556da26d-40db-42ce-85fe-96501eee7604" path="/var/lib/kubelet/pods/556da26d-40db-42ce-85fe-96501eee7604/volumes" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.565988 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-2shch"] Nov 25 10:22:59 crc kubenswrapper[4932]: E1125 10:22:59.566371 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api-log" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.566392 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api-log" Nov 25 10:22:59 crc kubenswrapper[4932]: E1125 10:22:59.566411 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aeceb7b7-798f-4dab-bc6d-d7cafbd68898" containerName="dnsmasq-dns" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.566418 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="aeceb7b7-798f-4dab-bc6d-d7cafbd68898" containerName="dnsmasq-dns" Nov 25 10:22:59 crc kubenswrapper[4932]: E1125 10:22:59.566435 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aeceb7b7-798f-4dab-bc6d-d7cafbd68898" containerName="init" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.566440 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="aeceb7b7-798f-4dab-bc6d-d7cafbd68898" containerName="init" Nov 25 10:22:59 crc kubenswrapper[4932]: E1125 10:22:59.566462 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.566468 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.566631 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="aeceb7b7-798f-4dab-bc6d-d7cafbd68898" containerName="dnsmasq-dns" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.566656 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api-log" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.566692 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="556da26d-40db-42ce-85fe-96501eee7604" containerName="barbican-api" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.567397 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2shch" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.575766 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-2shch"] Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.667835 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-1a17-account-create-9qkmp"] Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.669416 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1a17-account-create-9qkmp" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.672264 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.675542 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e724d116-0845-4e49-b2bd-66b5a62c3ddf-operator-scripts\") pod \"neutron-db-create-2shch\" (UID: \"e724d116-0845-4e49-b2bd-66b5a62c3ddf\") " pod="openstack/neutron-db-create-2shch" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.675649 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7m6x\" (UniqueName: \"kubernetes.io/projected/e724d116-0845-4e49-b2bd-66b5a62c3ddf-kube-api-access-s7m6x\") pod \"neutron-db-create-2shch\" (UID: \"e724d116-0845-4e49-b2bd-66b5a62c3ddf\") " pod="openstack/neutron-db-create-2shch" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.682378 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-1a17-account-create-9qkmp"] Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.777546 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-operator-scripts\") pod \"neutron-1a17-account-create-9qkmp\" (UID: \"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437\") " pod="openstack/neutron-1a17-account-create-9qkmp" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.777612 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7m6x\" (UniqueName: \"kubernetes.io/projected/e724d116-0845-4e49-b2bd-66b5a62c3ddf-kube-api-access-s7m6x\") pod \"neutron-db-create-2shch\" (UID: \"e724d116-0845-4e49-b2bd-66b5a62c3ddf\") " pod="openstack/neutron-db-create-2shch" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.777718 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e724d116-0845-4e49-b2bd-66b5a62c3ddf-operator-scripts\") pod \"neutron-db-create-2shch\" (UID: \"e724d116-0845-4e49-b2bd-66b5a62c3ddf\") " pod="openstack/neutron-db-create-2shch" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.777743 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhbnw\" (UniqueName: \"kubernetes.io/projected/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-kube-api-access-zhbnw\") pod \"neutron-1a17-account-create-9qkmp\" (UID: \"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437\") " pod="openstack/neutron-1a17-account-create-9qkmp" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.778940 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e724d116-0845-4e49-b2bd-66b5a62c3ddf-operator-scripts\") pod \"neutron-db-create-2shch\" (UID: \"e724d116-0845-4e49-b2bd-66b5a62c3ddf\") " pod="openstack/neutron-db-create-2shch" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.799128 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s7m6x\" (UniqueName: \"kubernetes.io/projected/e724d116-0845-4e49-b2bd-66b5a62c3ddf-kube-api-access-s7m6x\") pod \"neutron-db-create-2shch\" (UID: \"e724d116-0845-4e49-b2bd-66b5a62c3ddf\") " pod="openstack/neutron-db-create-2shch" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.879378 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhbnw\" (UniqueName: \"kubernetes.io/projected/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-kube-api-access-zhbnw\") pod \"neutron-1a17-account-create-9qkmp\" (UID: \"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437\") " pod="openstack/neutron-1a17-account-create-9qkmp" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.879456 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-operator-scripts\") pod \"neutron-1a17-account-create-9qkmp\" (UID: \"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437\") " pod="openstack/neutron-1a17-account-create-9qkmp" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.880159 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-operator-scripts\") pod \"neutron-1a17-account-create-9qkmp\" (UID: \"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437\") " pod="openstack/neutron-1a17-account-create-9qkmp" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.895006 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2shch" Nov 25 10:22:59 crc kubenswrapper[4932]: I1125 10:22:59.895650 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhbnw\" (UniqueName: \"kubernetes.io/projected/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-kube-api-access-zhbnw\") pod \"neutron-1a17-account-create-9qkmp\" (UID: \"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437\") " pod="openstack/neutron-1a17-account-create-9qkmp" Nov 25 10:23:00 crc kubenswrapper[4932]: I1125 10:23:00.026850 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1a17-account-create-9qkmp" Nov 25 10:23:00 crc kubenswrapper[4932]: I1125 10:23:00.329120 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-2shch"] Nov 25 10:23:00 crc kubenswrapper[4932]: W1125 10:23:00.335913 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode724d116_0845_4e49_b2bd_66b5a62c3ddf.slice/crio-8d68e2b3cf0920bd16c705885b381c39926155e323ca8e81d33abf267c34b2cf WatchSource:0}: Error finding container 8d68e2b3cf0920bd16c705885b381c39926155e323ca8e81d33abf267c34b2cf: Status 404 returned error can't find the container with id 8d68e2b3cf0920bd16c705885b381c39926155e323ca8e81d33abf267c34b2cf Nov 25 10:23:00 crc kubenswrapper[4932]: I1125 10:23:00.426086 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2shch" event={"ID":"e724d116-0845-4e49-b2bd-66b5a62c3ddf","Type":"ContainerStarted","Data":"8d68e2b3cf0920bd16c705885b381c39926155e323ca8e81d33abf267c34b2cf"} Nov 25 10:23:00 crc kubenswrapper[4932]: I1125 10:23:00.494609 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-1a17-account-create-9qkmp"] Nov 25 10:23:00 crc kubenswrapper[4932]: W1125 10:23:00.502509 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc4bb52af_9e09_4ade_a4f8_6cfe27cc0437.slice/crio-502bbf4560c46c7f8e8028166ac7b23675241f20090c3e4506b9897b1c2995dd WatchSource:0}: Error finding container 502bbf4560c46c7f8e8028166ac7b23675241f20090c3e4506b9897b1c2995dd: Status 404 returned error can't find the container with id 502bbf4560c46c7f8e8028166ac7b23675241f20090c3e4506b9897b1c2995dd Nov 25 10:23:01 crc kubenswrapper[4932]: I1125 10:23:01.435319 4932 generic.go:334] "Generic (PLEG): container finished" podID="e724d116-0845-4e49-b2bd-66b5a62c3ddf" containerID="392cdbd783296c21c94b00955fe1c6cca5fd492a37699af647ab31baa5e264cc" exitCode=0 Nov 25 10:23:01 crc kubenswrapper[4932]: I1125 10:23:01.435374 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2shch" event={"ID":"e724d116-0845-4e49-b2bd-66b5a62c3ddf","Type":"ContainerDied","Data":"392cdbd783296c21c94b00955fe1c6cca5fd492a37699af647ab31baa5e264cc"} Nov 25 10:23:01 crc kubenswrapper[4932]: I1125 10:23:01.438113 4932 generic.go:334] "Generic (PLEG): container finished" podID="c4bb52af-9e09-4ade-a4f8-6cfe27cc0437" containerID="a292be5c7abe174ef90e235e0074793677aad99f5dffc55489836b2a599a2e72" exitCode=0 Nov 25 10:23:01 crc kubenswrapper[4932]: I1125 10:23:01.438234 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-1a17-account-create-9qkmp" event={"ID":"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437","Type":"ContainerDied","Data":"a292be5c7abe174ef90e235e0074793677aad99f5dffc55489836b2a599a2e72"} Nov 25 10:23:01 crc kubenswrapper[4932]: I1125 10:23:01.438313 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-1a17-account-create-9qkmp" event={"ID":"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437","Type":"ContainerStarted","Data":"502bbf4560c46c7f8e8028166ac7b23675241f20090c3e4506b9897b1c2995dd"} Nov 25 10:23:02 crc kubenswrapper[4932]: I1125 10:23:02.829937 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2shch" Nov 25 10:23:02 crc kubenswrapper[4932]: I1125 10:23:02.841038 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1a17-account-create-9qkmp" Nov 25 10:23:02 crc kubenswrapper[4932]: I1125 10:23:02.952665 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e724d116-0845-4e49-b2bd-66b5a62c3ddf-operator-scripts\") pod \"e724d116-0845-4e49-b2bd-66b5a62c3ddf\" (UID: \"e724d116-0845-4e49-b2bd-66b5a62c3ddf\") " Nov 25 10:23:02 crc kubenswrapper[4932]: I1125 10:23:02.952709 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhbnw\" (UniqueName: \"kubernetes.io/projected/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-kube-api-access-zhbnw\") pod \"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437\" (UID: \"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437\") " Nov 25 10:23:02 crc kubenswrapper[4932]: I1125 10:23:02.952830 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s7m6x\" (UniqueName: \"kubernetes.io/projected/e724d116-0845-4e49-b2bd-66b5a62c3ddf-kube-api-access-s7m6x\") pod \"e724d116-0845-4e49-b2bd-66b5a62c3ddf\" (UID: \"e724d116-0845-4e49-b2bd-66b5a62c3ddf\") " Nov 25 10:23:02 crc kubenswrapper[4932]: I1125 10:23:02.952891 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-operator-scripts\") pod \"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437\" (UID: \"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437\") " Nov 25 10:23:02 crc kubenswrapper[4932]: I1125 10:23:02.954021 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e724d116-0845-4e49-b2bd-66b5a62c3ddf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e724d116-0845-4e49-b2bd-66b5a62c3ddf" (UID: "e724d116-0845-4e49-b2bd-66b5a62c3ddf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:23:02 crc kubenswrapper[4932]: I1125 10:23:02.954586 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c4bb52af-9e09-4ade-a4f8-6cfe27cc0437" (UID: "c4bb52af-9e09-4ade-a4f8-6cfe27cc0437"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:23:02 crc kubenswrapper[4932]: I1125 10:23:02.958855 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e724d116-0845-4e49-b2bd-66b5a62c3ddf-kube-api-access-s7m6x" (OuterVolumeSpecName: "kube-api-access-s7m6x") pod "e724d116-0845-4e49-b2bd-66b5a62c3ddf" (UID: "e724d116-0845-4e49-b2bd-66b5a62c3ddf"). InnerVolumeSpecName "kube-api-access-s7m6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:02 crc kubenswrapper[4932]: I1125 10:23:02.959029 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-kube-api-access-zhbnw" (OuterVolumeSpecName: "kube-api-access-zhbnw") pod "c4bb52af-9e09-4ade-a4f8-6cfe27cc0437" (UID: "c4bb52af-9e09-4ade-a4f8-6cfe27cc0437"). InnerVolumeSpecName "kube-api-access-zhbnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:03 crc kubenswrapper[4932]: I1125 10:23:03.055369 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7m6x\" (UniqueName: \"kubernetes.io/projected/e724d116-0845-4e49-b2bd-66b5a62c3ddf-kube-api-access-s7m6x\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:03 crc kubenswrapper[4932]: I1125 10:23:03.055406 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:03 crc kubenswrapper[4932]: I1125 10:23:03.055416 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e724d116-0845-4e49-b2bd-66b5a62c3ddf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:03 crc kubenswrapper[4932]: I1125 10:23:03.055426 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhbnw\" (UniqueName: \"kubernetes.io/projected/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437-kube-api-access-zhbnw\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:03 crc kubenswrapper[4932]: I1125 10:23:03.456152 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-2shch" event={"ID":"e724d116-0845-4e49-b2bd-66b5a62c3ddf","Type":"ContainerDied","Data":"8d68e2b3cf0920bd16c705885b381c39926155e323ca8e81d33abf267c34b2cf"} Nov 25 10:23:03 crc kubenswrapper[4932]: I1125 10:23:03.456268 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8d68e2b3cf0920bd16c705885b381c39926155e323ca8e81d33abf267c34b2cf" Nov 25 10:23:03 crc kubenswrapper[4932]: I1125 10:23:03.456431 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-2shch" Nov 25 10:23:03 crc kubenswrapper[4932]: I1125 10:23:03.458899 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-1a17-account-create-9qkmp" event={"ID":"c4bb52af-9e09-4ade-a4f8-6cfe27cc0437","Type":"ContainerDied","Data":"502bbf4560c46c7f8e8028166ac7b23675241f20090c3e4506b9897b1c2995dd"} Nov 25 10:23:03 crc kubenswrapper[4932]: I1125 10:23:03.458927 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="502bbf4560c46c7f8e8028166ac7b23675241f20090c3e4506b9897b1c2995dd" Nov 25 10:23:03 crc kubenswrapper[4932]: I1125 10:23:03.458984 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1a17-account-create-9qkmp" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.896829 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-7chrd"] Nov 25 10:23:04 crc kubenswrapper[4932]: E1125 10:23:04.897291 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e724d116-0845-4e49-b2bd-66b5a62c3ddf" containerName="mariadb-database-create" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.897306 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e724d116-0845-4e49-b2bd-66b5a62c3ddf" containerName="mariadb-database-create" Nov 25 10:23:04 crc kubenswrapper[4932]: E1125 10:23:04.897336 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4bb52af-9e09-4ade-a4f8-6cfe27cc0437" containerName="mariadb-account-create" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.897343 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4bb52af-9e09-4ade-a4f8-6cfe27cc0437" containerName="mariadb-account-create" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.897510 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e724d116-0845-4e49-b2bd-66b5a62c3ddf" containerName="mariadb-database-create" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.897520 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4bb52af-9e09-4ade-a4f8-6cfe27cc0437" containerName="mariadb-account-create" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.898119 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.900338 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-h5wsp" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.900742 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.901701 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.905178 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-7chrd"] Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.987693 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-config\") pod \"neutron-db-sync-7chrd\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.988126 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89kgm\" (UniqueName: \"kubernetes.io/projected/6e9c29f9-2e9f-499c-8902-4f500bc57328-kube-api-access-89kgm\") pod \"neutron-db-sync-7chrd\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:04 crc kubenswrapper[4932]: I1125 10:23:04.988159 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-combined-ca-bundle\") pod \"neutron-db-sync-7chrd\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:05 crc kubenswrapper[4932]: I1125 10:23:05.089624 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-combined-ca-bundle\") pod \"neutron-db-sync-7chrd\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:05 crc kubenswrapper[4932]: I1125 10:23:05.089794 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-config\") pod \"neutron-db-sync-7chrd\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:05 crc kubenswrapper[4932]: I1125 10:23:05.089862 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89kgm\" (UniqueName: \"kubernetes.io/projected/6e9c29f9-2e9f-499c-8902-4f500bc57328-kube-api-access-89kgm\") pod \"neutron-db-sync-7chrd\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:05 crc kubenswrapper[4932]: I1125 10:23:05.096506 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-config\") pod \"neutron-db-sync-7chrd\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:05 crc kubenswrapper[4932]: I1125 10:23:05.097580 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-combined-ca-bundle\") pod \"neutron-db-sync-7chrd\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:05 crc kubenswrapper[4932]: I1125 10:23:05.106097 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89kgm\" (UniqueName: \"kubernetes.io/projected/6e9c29f9-2e9f-499c-8902-4f500bc57328-kube-api-access-89kgm\") pod \"neutron-db-sync-7chrd\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:05 crc kubenswrapper[4932]: I1125 10:23:05.217751 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:05 crc kubenswrapper[4932]: I1125 10:23:05.685433 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-7chrd"] Nov 25 10:23:06 crc kubenswrapper[4932]: I1125 10:23:06.502380 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7chrd" event={"ID":"6e9c29f9-2e9f-499c-8902-4f500bc57328","Type":"ContainerStarted","Data":"bf103e93edc2bf9d2e8e1541bf3f59b3bcc9454b1c86186111c93bb179633a49"} Nov 25 10:23:06 crc kubenswrapper[4932]: I1125 10:23:06.502944 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7chrd" event={"ID":"6e9c29f9-2e9f-499c-8902-4f500bc57328","Type":"ContainerStarted","Data":"05888432ff9e10213448da3fbf4c2e547321fb577fde5ae4ce2ccfa27ac0f178"} Nov 25 10:23:06 crc kubenswrapper[4932]: I1125 10:23:06.520467 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-7chrd" podStartSLOduration=2.520442293 podStartE2EDuration="2.520442293s" podCreationTimestamp="2025-11-25 10:23:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:23:06.517263632 +0000 UTC m=+5646.643293195" watchObservedRunningTime="2025-11-25 10:23:06.520442293 +0000 UTC m=+5646.646471856" Nov 25 10:23:07 crc kubenswrapper[4932]: I1125 10:23:07.181392 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:23:07 crc kubenswrapper[4932]: I1125 10:23:07.181978 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:23:10 crc kubenswrapper[4932]: I1125 10:23:10.533464 4932 generic.go:334] "Generic (PLEG): container finished" podID="6e9c29f9-2e9f-499c-8902-4f500bc57328" containerID="bf103e93edc2bf9d2e8e1541bf3f59b3bcc9454b1c86186111c93bb179633a49" exitCode=0 Nov 25 10:23:10 crc kubenswrapper[4932]: I1125 10:23:10.533513 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7chrd" event={"ID":"6e9c29f9-2e9f-499c-8902-4f500bc57328","Type":"ContainerDied","Data":"bf103e93edc2bf9d2e8e1541bf3f59b3bcc9454b1c86186111c93bb179633a49"} Nov 25 10:23:11 crc kubenswrapper[4932]: I1125 10:23:11.864401 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.028602 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89kgm\" (UniqueName: \"kubernetes.io/projected/6e9c29f9-2e9f-499c-8902-4f500bc57328-kube-api-access-89kgm\") pod \"6e9c29f9-2e9f-499c-8902-4f500bc57328\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.028748 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-combined-ca-bundle\") pod \"6e9c29f9-2e9f-499c-8902-4f500bc57328\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.028813 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-config\") pod \"6e9c29f9-2e9f-499c-8902-4f500bc57328\" (UID: \"6e9c29f9-2e9f-499c-8902-4f500bc57328\") " Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.034267 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e9c29f9-2e9f-499c-8902-4f500bc57328-kube-api-access-89kgm" (OuterVolumeSpecName: "kube-api-access-89kgm") pod "6e9c29f9-2e9f-499c-8902-4f500bc57328" (UID: "6e9c29f9-2e9f-499c-8902-4f500bc57328"). InnerVolumeSpecName "kube-api-access-89kgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.053942 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-config" (OuterVolumeSpecName: "config") pod "6e9c29f9-2e9f-499c-8902-4f500bc57328" (UID: "6e9c29f9-2e9f-499c-8902-4f500bc57328"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.054360 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e9c29f9-2e9f-499c-8902-4f500bc57328" (UID: "6e9c29f9-2e9f-499c-8902-4f500bc57328"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.130619 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89kgm\" (UniqueName: \"kubernetes.io/projected/6e9c29f9-2e9f-499c-8902-4f500bc57328-kube-api-access-89kgm\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.130662 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.130674 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/6e9c29f9-2e9f-499c-8902-4f500bc57328-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.551066 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-7chrd" event={"ID":"6e9c29f9-2e9f-499c-8902-4f500bc57328","Type":"ContainerDied","Data":"05888432ff9e10213448da3fbf4c2e547321fb577fde5ae4ce2ccfa27ac0f178"} Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.551116 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05888432ff9e10213448da3fbf4c2e547321fb577fde5ae4ce2ccfa27ac0f178" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.551168 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-7chrd" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.685077 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-755584b759-5vxwv"] Nov 25 10:23:12 crc kubenswrapper[4932]: E1125 10:23:12.685488 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e9c29f9-2e9f-499c-8902-4f500bc57328" containerName="neutron-db-sync" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.685509 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e9c29f9-2e9f-499c-8902-4f500bc57328" containerName="neutron-db-sync" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.685698 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e9c29f9-2e9f-499c-8902-4f500bc57328" containerName="neutron-db-sync" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.686645 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.696845 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-755584b759-5vxwv"] Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.843031 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-dns-svc\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.843109 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-nb\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.843147 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-config\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.843171 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-sb\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.843231 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9p42\" (UniqueName: \"kubernetes.io/projected/82349d87-563d-4bb6-92f3-b795bb04b09b-kube-api-access-x9p42\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.940865 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-58ff45bb68-kcptq"] Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.942263 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.945103 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-dns-svc\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.945182 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-nb\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.945238 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-config\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.945264 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-sb\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.945300 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9p42\" (UniqueName: \"kubernetes.io/projected/82349d87-563d-4bb6-92f3-b795bb04b09b-kube-api-access-x9p42\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.946227 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-dns-svc\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.946472 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.946646 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.946663 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-nb\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.947428 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-config\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.947444 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-sb\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.949752 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-h5wsp" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.950349 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.961854 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-58ff45bb68-kcptq"] Nov 25 10:23:12 crc kubenswrapper[4932]: I1125 10:23:12.971484 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9p42\" (UniqueName: \"kubernetes.io/projected/82349d87-563d-4bb6-92f3-b795bb04b09b-kube-api-access-x9p42\") pod \"dnsmasq-dns-755584b759-5vxwv\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.007262 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.047365 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-combined-ca-bundle\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.047585 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-ovndb-tls-certs\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.048182 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-httpd-config\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.048340 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vtr29\" (UniqueName: \"kubernetes.io/projected/350fda0b-0477-45ef-bd5e-91a64ef91332-kube-api-access-vtr29\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.048412 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-config\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.151891 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-combined-ca-bundle\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.151945 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-ovndb-tls-certs\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.152052 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-httpd-config\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.152096 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vtr29\" (UniqueName: \"kubernetes.io/projected/350fda0b-0477-45ef-bd5e-91a64ef91332-kube-api-access-vtr29\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.152140 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-config\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.161815 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-combined-ca-bundle\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.162370 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-config\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.174007 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-ovndb-tls-certs\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.174658 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-httpd-config\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.186368 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vtr29\" (UniqueName: \"kubernetes.io/projected/350fda0b-0477-45ef-bd5e-91a64ef91332-kube-api-access-vtr29\") pod \"neutron-58ff45bb68-kcptq\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.264637 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.516905 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-755584b759-5vxwv"] Nov 25 10:23:13 crc kubenswrapper[4932]: W1125 10:23:13.523053 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82349d87_563d_4bb6_92f3_b795bb04b09b.slice/crio-db30b8f5af288034ca98f8b642e3335f7eba7240131ed40fab23ef402bc70668 WatchSource:0}: Error finding container db30b8f5af288034ca98f8b642e3335f7eba7240131ed40fab23ef402bc70668: Status 404 returned error can't find the container with id db30b8f5af288034ca98f8b642e3335f7eba7240131ed40fab23ef402bc70668 Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.585475 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-755584b759-5vxwv" event={"ID":"82349d87-563d-4bb6-92f3-b795bb04b09b","Type":"ContainerStarted","Data":"db30b8f5af288034ca98f8b642e3335f7eba7240131ed40fab23ef402bc70668"} Nov 25 10:23:13 crc kubenswrapper[4932]: I1125 10:23:13.849125 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-58ff45bb68-kcptq"] Nov 25 10:23:13 crc kubenswrapper[4932]: W1125 10:23:13.859790 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod350fda0b_0477_45ef_bd5e_91a64ef91332.slice/crio-b964863324d392e74b157cbf6537dd1a9cd457318b319598402210e0c4c924b1 WatchSource:0}: Error finding container b964863324d392e74b157cbf6537dd1a9cd457318b319598402210e0c4c924b1: Status 404 returned error can't find the container with id b964863324d392e74b157cbf6537dd1a9cd457318b319598402210e0c4c924b1 Nov 25 10:23:14 crc kubenswrapper[4932]: I1125 10:23:14.595549 4932 generic.go:334] "Generic (PLEG): container finished" podID="82349d87-563d-4bb6-92f3-b795bb04b09b" containerID="7c3431caad0783948b03ab22cff257c867eb924fd0d110f78d293ab4bcd7c066" exitCode=0 Nov 25 10:23:14 crc kubenswrapper[4932]: I1125 10:23:14.595769 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-755584b759-5vxwv" event={"ID":"82349d87-563d-4bb6-92f3-b795bb04b09b","Type":"ContainerDied","Data":"7c3431caad0783948b03ab22cff257c867eb924fd0d110f78d293ab4bcd7c066"} Nov 25 10:23:14 crc kubenswrapper[4932]: I1125 10:23:14.601385 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58ff45bb68-kcptq" event={"ID":"350fda0b-0477-45ef-bd5e-91a64ef91332","Type":"ContainerStarted","Data":"48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1"} Nov 25 10:23:14 crc kubenswrapper[4932]: I1125 10:23:14.601423 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58ff45bb68-kcptq" event={"ID":"350fda0b-0477-45ef-bd5e-91a64ef91332","Type":"ContainerStarted","Data":"166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1"} Nov 25 10:23:14 crc kubenswrapper[4932]: I1125 10:23:14.601434 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58ff45bb68-kcptq" event={"ID":"350fda0b-0477-45ef-bd5e-91a64ef91332","Type":"ContainerStarted","Data":"b964863324d392e74b157cbf6537dd1a9cd457318b319598402210e0c4c924b1"} Nov 25 10:23:14 crc kubenswrapper[4932]: I1125 10:23:14.602059 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:14 crc kubenswrapper[4932]: I1125 10:23:14.661429 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-58ff45bb68-kcptq" podStartSLOduration=2.661404447 podStartE2EDuration="2.661404447s" podCreationTimestamp="2025-11-25 10:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:23:14.64792453 +0000 UTC m=+5654.773954103" watchObservedRunningTime="2025-11-25 10:23:14.661404447 +0000 UTC m=+5654.787434010" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.318849 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7ffcbf6bcc-nwp98"] Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.321077 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.324624 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.331739 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.380810 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7ffcbf6bcc-nwp98"] Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.398325 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-public-tls-certs\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.398382 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch57p\" (UniqueName: \"kubernetes.io/projected/1e59b590-1272-49c7-a781-fb591ecc36f3-kube-api-access-ch57p\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.398436 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-httpd-config\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.398570 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-config\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.398601 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-combined-ca-bundle\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.398663 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-ovndb-tls-certs\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.398690 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-internal-tls-certs\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.500235 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-config\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.500293 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-combined-ca-bundle\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.500366 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-ovndb-tls-certs\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.500397 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-internal-tls-certs\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.500422 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-public-tls-certs\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.500445 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch57p\" (UniqueName: \"kubernetes.io/projected/1e59b590-1272-49c7-a781-fb591ecc36f3-kube-api-access-ch57p\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.500492 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-httpd-config\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.507379 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-httpd-config\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.507996 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-ovndb-tls-certs\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.509549 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-combined-ca-bundle\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.511146 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-public-tls-certs\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.511616 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-internal-tls-certs\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.526529 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1e59b590-1272-49c7-a781-fb591ecc36f3-config\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.550889 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch57p\" (UniqueName: \"kubernetes.io/projected/1e59b590-1272-49c7-a781-fb591ecc36f3-kube-api-access-ch57p\") pod \"neutron-7ffcbf6bcc-nwp98\" (UID: \"1e59b590-1272-49c7-a781-fb591ecc36f3\") " pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.613406 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-755584b759-5vxwv" event={"ID":"82349d87-563d-4bb6-92f3-b795bb04b09b","Type":"ContainerStarted","Data":"f07819e599adc85bf50c5a1eaaf8141cd7f31a26ea4bc61924d70eedecfe186e"} Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.614497 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.641002 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:15 crc kubenswrapper[4932]: I1125 10:23:15.642330 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-755584b759-5vxwv" podStartSLOduration=3.6423093250000003 podStartE2EDuration="3.642309325s" podCreationTimestamp="2025-11-25 10:23:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:23:15.638480175 +0000 UTC m=+5655.764509748" watchObservedRunningTime="2025-11-25 10:23:15.642309325 +0000 UTC m=+5655.768338888" Nov 25 10:23:16 crc kubenswrapper[4932]: I1125 10:23:16.315982 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7ffcbf6bcc-nwp98"] Nov 25 10:23:16 crc kubenswrapper[4932]: I1125 10:23:16.637372 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ffcbf6bcc-nwp98" event={"ID":"1e59b590-1272-49c7-a781-fb591ecc36f3","Type":"ContainerStarted","Data":"39374a355c30c2d23831fa593833a22579b56835b57d37d8bba785be7d24c074"} Nov 25 10:23:16 crc kubenswrapper[4932]: I1125 10:23:16.637714 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ffcbf6bcc-nwp98" event={"ID":"1e59b590-1272-49c7-a781-fb591ecc36f3","Type":"ContainerStarted","Data":"7e27ac0bee6dcef47aa8da873c0697b236b3ada23edb403a5936f774fbf0015a"} Nov 25 10:23:17 crc kubenswrapper[4932]: I1125 10:23:17.649412 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ffcbf6bcc-nwp98" event={"ID":"1e59b590-1272-49c7-a781-fb591ecc36f3","Type":"ContainerStarted","Data":"59b0f5141569379e6338ce1d0aaa5338b282b36698e8d4c5060ec603cbec91b5"} Nov 25 10:23:17 crc kubenswrapper[4932]: I1125 10:23:17.649738 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:17 crc kubenswrapper[4932]: I1125 10:23:17.665699 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7ffcbf6bcc-nwp98" podStartSLOduration=2.665680815 podStartE2EDuration="2.665680815s" podCreationTimestamp="2025-11-25 10:23:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:23:17.665460989 +0000 UTC m=+5657.791490572" watchObservedRunningTime="2025-11-25 10:23:17.665680815 +0000 UTC m=+5657.791710378" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.009422 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.103307 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d8667c859-265pc"] Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.103927 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-d8667c859-265pc" podUID="cb12d6e6-7da1-4090-9a49-d731c142d257" containerName="dnsmasq-dns" containerID="cri-o://e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775" gracePeriod=10 Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.582837 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.677404 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-dns-svc\") pod \"cb12d6e6-7da1-4090-9a49-d731c142d257\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.677702 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-nb\") pod \"cb12d6e6-7da1-4090-9a49-d731c142d257\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.677770 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgfpm\" (UniqueName: \"kubernetes.io/projected/cb12d6e6-7da1-4090-9a49-d731c142d257-kube-api-access-sgfpm\") pod \"cb12d6e6-7da1-4090-9a49-d731c142d257\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.677862 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-config\") pod \"cb12d6e6-7da1-4090-9a49-d731c142d257\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.677943 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-sb\") pod \"cb12d6e6-7da1-4090-9a49-d731c142d257\" (UID: \"cb12d6e6-7da1-4090-9a49-d731c142d257\") " Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.687870 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb12d6e6-7da1-4090-9a49-d731c142d257-kube-api-access-sgfpm" (OuterVolumeSpecName: "kube-api-access-sgfpm") pod "cb12d6e6-7da1-4090-9a49-d731c142d257" (UID: "cb12d6e6-7da1-4090-9a49-d731c142d257"). InnerVolumeSpecName "kube-api-access-sgfpm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.768828 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cb12d6e6-7da1-4090-9a49-d731c142d257" (UID: "cb12d6e6-7da1-4090-9a49-d731c142d257"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.771504 4932 generic.go:334] "Generic (PLEG): container finished" podID="cb12d6e6-7da1-4090-9a49-d731c142d257" containerID="e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775" exitCode=0 Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.771544 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d8667c859-265pc" event={"ID":"cb12d6e6-7da1-4090-9a49-d731c142d257","Type":"ContainerDied","Data":"e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775"} Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.771574 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-d8667c859-265pc" event={"ID":"cb12d6e6-7da1-4090-9a49-d731c142d257","Type":"ContainerDied","Data":"d56388d1adf4f408ee9d2fe6f3c4b8f22f4cd21e737cf06e7fdf0deb28673e78"} Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.771589 4932 scope.go:117] "RemoveContainer" containerID="e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.771774 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-d8667c859-265pc" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.780812 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cb12d6e6-7da1-4090-9a49-d731c142d257" (UID: "cb12d6e6-7da1-4090-9a49-d731c142d257"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.782704 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.782871 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgfpm\" (UniqueName: \"kubernetes.io/projected/cb12d6e6-7da1-4090-9a49-d731c142d257-kube-api-access-sgfpm\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.782935 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.816615 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-config" (OuterVolumeSpecName: "config") pod "cb12d6e6-7da1-4090-9a49-d731c142d257" (UID: "cb12d6e6-7da1-4090-9a49-d731c142d257"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.816721 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cb12d6e6-7da1-4090-9a49-d731c142d257" (UID: "cb12d6e6-7da1-4090-9a49-d731c142d257"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.822860 4932 scope.go:117] "RemoveContainer" containerID="178a7bdc374b0c9d6505e923a29f696c9e65b7cac3d544a06adb894faccb94b6" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.846095 4932 scope.go:117] "RemoveContainer" containerID="e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775" Nov 25 10:23:23 crc kubenswrapper[4932]: E1125 10:23:23.846626 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775\": container with ID starting with e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775 not found: ID does not exist" containerID="e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.846676 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775"} err="failed to get container status \"e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775\": rpc error: code = NotFound desc = could not find container \"e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775\": container with ID starting with e44b279e5cf1d38b9275e050b64f905204237df923d2b5297f0bc8b75c439775 not found: ID does not exist" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.846706 4932 scope.go:117] "RemoveContainer" containerID="178a7bdc374b0c9d6505e923a29f696c9e65b7cac3d544a06adb894faccb94b6" Nov 25 10:23:23 crc kubenswrapper[4932]: E1125 10:23:23.846957 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"178a7bdc374b0c9d6505e923a29f696c9e65b7cac3d544a06adb894faccb94b6\": container with ID starting with 178a7bdc374b0c9d6505e923a29f696c9e65b7cac3d544a06adb894faccb94b6 not found: ID does not exist" containerID="178a7bdc374b0c9d6505e923a29f696c9e65b7cac3d544a06adb894faccb94b6" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.846979 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"178a7bdc374b0c9d6505e923a29f696c9e65b7cac3d544a06adb894faccb94b6"} err="failed to get container status \"178a7bdc374b0c9d6505e923a29f696c9e65b7cac3d544a06adb894faccb94b6\": rpc error: code = NotFound desc = could not find container \"178a7bdc374b0c9d6505e923a29f696c9e65b7cac3d544a06adb894faccb94b6\": container with ID starting with 178a7bdc374b0c9d6505e923a29f696c9e65b7cac3d544a06adb894faccb94b6 not found: ID does not exist" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.884654 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:23 crc kubenswrapper[4932]: I1125 10:23:23.884690 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb12d6e6-7da1-4090-9a49-d731c142d257-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:24 crc kubenswrapper[4932]: I1125 10:23:24.101763 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-d8667c859-265pc"] Nov 25 10:23:24 crc kubenswrapper[4932]: I1125 10:23:24.109265 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-d8667c859-265pc"] Nov 25 10:23:24 crc kubenswrapper[4932]: I1125 10:23:24.616871 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb12d6e6-7da1-4090-9a49-d731c142d257" path="/var/lib/kubelet/pods/cb12d6e6-7da1-4090-9a49-d731c142d257/volumes" Nov 25 10:23:37 crc kubenswrapper[4932]: I1125 10:23:37.181608 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:23:37 crc kubenswrapper[4932]: I1125 10:23:37.182048 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:23:43 crc kubenswrapper[4932]: I1125 10:23:43.273873 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:45 crc kubenswrapper[4932]: I1125 10:23:45.660609 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7ffcbf6bcc-nwp98" Nov 25 10:23:45 crc kubenswrapper[4932]: I1125 10:23:45.734138 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-58ff45bb68-kcptq"] Nov 25 10:23:45 crc kubenswrapper[4932]: I1125 10:23:45.734384 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-58ff45bb68-kcptq" podUID="350fda0b-0477-45ef-bd5e-91a64ef91332" containerName="neutron-api" containerID="cri-o://166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1" gracePeriod=30 Nov 25 10:23:45 crc kubenswrapper[4932]: I1125 10:23:45.734833 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-58ff45bb68-kcptq" podUID="350fda0b-0477-45ef-bd5e-91a64ef91332" containerName="neutron-httpd" containerID="cri-o://48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1" gracePeriod=30 Nov 25 10:23:46 crc kubenswrapper[4932]: I1125 10:23:46.997080 4932 generic.go:334] "Generic (PLEG): container finished" podID="350fda0b-0477-45ef-bd5e-91a64ef91332" containerID="48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1" exitCode=0 Nov 25 10:23:46 crc kubenswrapper[4932]: I1125 10:23:46.997647 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58ff45bb68-kcptq" event={"ID":"350fda0b-0477-45ef-bd5e-91a64ef91332","Type":"ContainerDied","Data":"48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1"} Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.668295 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.815478 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vtr29\" (UniqueName: \"kubernetes.io/projected/350fda0b-0477-45ef-bd5e-91a64ef91332-kube-api-access-vtr29\") pod \"350fda0b-0477-45ef-bd5e-91a64ef91332\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.815797 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-config\") pod \"350fda0b-0477-45ef-bd5e-91a64ef91332\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.815874 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-ovndb-tls-certs\") pod \"350fda0b-0477-45ef-bd5e-91a64ef91332\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.815978 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-httpd-config\") pod \"350fda0b-0477-45ef-bd5e-91a64ef91332\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.816088 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-combined-ca-bundle\") pod \"350fda0b-0477-45ef-bd5e-91a64ef91332\" (UID: \"350fda0b-0477-45ef-bd5e-91a64ef91332\") " Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.823907 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "350fda0b-0477-45ef-bd5e-91a64ef91332" (UID: "350fda0b-0477-45ef-bd5e-91a64ef91332"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.825281 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/350fda0b-0477-45ef-bd5e-91a64ef91332-kube-api-access-vtr29" (OuterVolumeSpecName: "kube-api-access-vtr29") pod "350fda0b-0477-45ef-bd5e-91a64ef91332" (UID: "350fda0b-0477-45ef-bd5e-91a64ef91332"). InnerVolumeSpecName "kube-api-access-vtr29". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.870429 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "350fda0b-0477-45ef-bd5e-91a64ef91332" (UID: "350fda0b-0477-45ef-bd5e-91a64ef91332"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.873052 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-config" (OuterVolumeSpecName: "config") pod "350fda0b-0477-45ef-bd5e-91a64ef91332" (UID: "350fda0b-0477-45ef-bd5e-91a64ef91332"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.890039 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "350fda0b-0477-45ef-bd5e-91a64ef91332" (UID: "350fda0b-0477-45ef-bd5e-91a64ef91332"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.918969 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vtr29\" (UniqueName: \"kubernetes.io/projected/350fda0b-0477-45ef-bd5e-91a64ef91332-kube-api-access-vtr29\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.919004 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.919014 4932 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.919022 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:50 crc kubenswrapper[4932]: I1125 10:23:50.919031 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/350fda0b-0477-45ef-bd5e-91a64ef91332-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.043390 4932 generic.go:334] "Generic (PLEG): container finished" podID="350fda0b-0477-45ef-bd5e-91a64ef91332" containerID="166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1" exitCode=0 Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.043447 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58ff45bb68-kcptq" event={"ID":"350fda0b-0477-45ef-bd5e-91a64ef91332","Type":"ContainerDied","Data":"166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1"} Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.043454 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-58ff45bb68-kcptq" Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.043489 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-58ff45bb68-kcptq" event={"ID":"350fda0b-0477-45ef-bd5e-91a64ef91332","Type":"ContainerDied","Data":"b964863324d392e74b157cbf6537dd1a9cd457318b319598402210e0c4c924b1"} Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.043511 4932 scope.go:117] "RemoveContainer" containerID="48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1" Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.068453 4932 scope.go:117] "RemoveContainer" containerID="166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1" Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.081864 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-58ff45bb68-kcptq"] Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.089036 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-58ff45bb68-kcptq"] Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.099569 4932 scope.go:117] "RemoveContainer" containerID="48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1" Nov 25 10:23:51 crc kubenswrapper[4932]: E1125 10:23:51.099994 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1\": container with ID starting with 48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1 not found: ID does not exist" containerID="48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1" Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.100050 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1"} err="failed to get container status \"48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1\": rpc error: code = NotFound desc = could not find container \"48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1\": container with ID starting with 48423599cd81dad730c4504585b1f6a410da99e20cf1b0596a4a03492b13e7a1 not found: ID does not exist" Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.100083 4932 scope.go:117] "RemoveContainer" containerID="166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1" Nov 25 10:23:51 crc kubenswrapper[4932]: E1125 10:23:51.100503 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1\": container with ID starting with 166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1 not found: ID does not exist" containerID="166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1" Nov 25 10:23:51 crc kubenswrapper[4932]: I1125 10:23:51.100540 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1"} err="failed to get container status \"166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1\": rpc error: code = NotFound desc = could not find container \"166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1\": container with ID starting with 166430f51b92b99f23babbac46e59af0a23f30d8857ebd0bc357fc6fb19d78b1 not found: ID does not exist" Nov 25 10:23:52 crc kubenswrapper[4932]: I1125 10:23:52.617579 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="350fda0b-0477-45ef-bd5e-91a64ef91332" path="/var/lib/kubelet/pods/350fda0b-0477-45ef-bd5e-91a64ef91332/volumes" Nov 25 10:24:07 crc kubenswrapper[4932]: I1125 10:24:07.181390 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:24:07 crc kubenswrapper[4932]: I1125 10:24:07.182412 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:24:07 crc kubenswrapper[4932]: I1125 10:24:07.182481 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 10:24:07 crc kubenswrapper[4932]: I1125 10:24:07.183272 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:24:07 crc kubenswrapper[4932]: I1125 10:24:07.183350 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" gracePeriod=600 Nov 25 10:24:07 crc kubenswrapper[4932]: E1125 10:24:07.835524 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:24:08 crc kubenswrapper[4932]: I1125 10:24:08.204086 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" exitCode=0 Nov 25 10:24:08 crc kubenswrapper[4932]: I1125 10:24:08.204124 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2"} Nov 25 10:24:08 crc kubenswrapper[4932]: I1125 10:24:08.204207 4932 scope.go:117] "RemoveContainer" containerID="5afb8b5224b9f6d2fa11c4acc1cecf78fcbf4f0caae59d8d4bbde36efe0769d9" Nov 25 10:24:08 crc kubenswrapper[4932]: I1125 10:24:08.204833 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:24:08 crc kubenswrapper[4932]: E1125 10:24:08.205052 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.410135 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-jkmbg"] Nov 25 10:24:10 crc kubenswrapper[4932]: E1125 10:24:10.410934 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb12d6e6-7da1-4090-9a49-d731c142d257" containerName="dnsmasq-dns" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.410952 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb12d6e6-7da1-4090-9a49-d731c142d257" containerName="dnsmasq-dns" Nov 25 10:24:10 crc kubenswrapper[4932]: E1125 10:24:10.410970 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb12d6e6-7da1-4090-9a49-d731c142d257" containerName="init" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.410978 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb12d6e6-7da1-4090-9a49-d731c142d257" containerName="init" Nov 25 10:24:10 crc kubenswrapper[4932]: E1125 10:24:10.411007 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="350fda0b-0477-45ef-bd5e-91a64ef91332" containerName="neutron-api" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.411016 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="350fda0b-0477-45ef-bd5e-91a64ef91332" containerName="neutron-api" Nov 25 10:24:10 crc kubenswrapper[4932]: E1125 10:24:10.411037 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="350fda0b-0477-45ef-bd5e-91a64ef91332" containerName="neutron-httpd" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.411044 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="350fda0b-0477-45ef-bd5e-91a64ef91332" containerName="neutron-httpd" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.411296 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb12d6e6-7da1-4090-9a49-d731c142d257" containerName="dnsmasq-dns" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.411328 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="350fda0b-0477-45ef-bd5e-91a64ef91332" containerName="neutron-httpd" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.411337 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="350fda0b-0477-45ef-bd5e-91a64ef91332" containerName="neutron-api" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.412180 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.416067 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.416399 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.416571 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.416754 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-56bmt" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.419342 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.501413 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-jkmbg"] Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.518516 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-jkmbg"] Nov 25 10:24:10 crc kubenswrapper[4932]: E1125 10:24:10.519364 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-zkgz2 ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/swift-ring-rebalance-jkmbg" podUID="9b06c0d3-97da-4a3e-b65b-dd902578a175" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.546409 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c46f9d5bf-gx5n6"] Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.548082 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.554154 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c46f9d5bf-gx5n6"] Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.588482 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-ring-data-devices\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.588542 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9b06c0d3-97da-4a3e-b65b-dd902578a175-etc-swift\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.588566 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-combined-ca-bundle\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.588602 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-swiftconf\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.588639 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-scripts\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.588666 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-dispersionconf\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.588732 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkgz2\" (UniqueName: \"kubernetes.io/projected/9b06c0d3-97da-4a3e-b65b-dd902578a175-kube-api-access-zkgz2\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690412 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-sb\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690465 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-combined-ca-bundle\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690498 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9b06c0d3-97da-4a3e-b65b-dd902578a175-etc-swift\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690542 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-swiftconf\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690587 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-scripts\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690616 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-dispersionconf\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690687 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkgz2\" (UniqueName: \"kubernetes.io/projected/9b06c0d3-97da-4a3e-b65b-dd902578a175-kube-api-access-zkgz2\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690740 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp8qb\" (UniqueName: \"kubernetes.io/projected/a6d6e550-47c7-40fb-84de-4603f403720d-kube-api-access-kp8qb\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690808 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-config\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690845 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-dns-svc\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690872 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-ring-data-devices\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.690897 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-nb\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.691653 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-ring-data-devices\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.691725 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-scripts\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.692975 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9b06c0d3-97da-4a3e-b65b-dd902578a175-etc-swift\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.696049 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-dispersionconf\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.698534 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-swiftconf\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.699937 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-combined-ca-bundle\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.719991 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkgz2\" (UniqueName: \"kubernetes.io/projected/9b06c0d3-97da-4a3e-b65b-dd902578a175-kube-api-access-zkgz2\") pod \"swift-ring-rebalance-jkmbg\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.792763 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-config\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.792820 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-dns-svc\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.792840 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-nb\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.792856 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-sb\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.792986 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp8qb\" (UniqueName: \"kubernetes.io/projected/a6d6e550-47c7-40fb-84de-4603f403720d-kube-api-access-kp8qb\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.794019 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-config\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.794549 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-dns-svc\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.795093 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-nb\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.795763 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-sb\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.821225 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp8qb\" (UniqueName: \"kubernetes.io/projected/a6d6e550-47c7-40fb-84de-4603f403720d-kube-api-access-kp8qb\") pod \"dnsmasq-dns-6c46f9d5bf-gx5n6\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:10 crc kubenswrapper[4932]: I1125 10:24:10.872364 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.253303 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.266437 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.373306 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c46f9d5bf-gx5n6"] Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.406625 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-ring-data-devices\") pod \"9b06c0d3-97da-4a3e-b65b-dd902578a175\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.407019 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-combined-ca-bundle\") pod \"9b06c0d3-97da-4a3e-b65b-dd902578a175\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.407324 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-dispersionconf\") pod \"9b06c0d3-97da-4a3e-b65b-dd902578a175\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.407512 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkgz2\" (UniqueName: \"kubernetes.io/projected/9b06c0d3-97da-4a3e-b65b-dd902578a175-kube-api-access-zkgz2\") pod \"9b06c0d3-97da-4a3e-b65b-dd902578a175\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.407656 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9b06c0d3-97da-4a3e-b65b-dd902578a175-etc-swift\") pod \"9b06c0d3-97da-4a3e-b65b-dd902578a175\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.407768 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-swiftconf\") pod \"9b06c0d3-97da-4a3e-b65b-dd902578a175\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.407876 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-scripts\") pod \"9b06c0d3-97da-4a3e-b65b-dd902578a175\" (UID: \"9b06c0d3-97da-4a3e-b65b-dd902578a175\") " Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.408898 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "9b06c0d3-97da-4a3e-b65b-dd902578a175" (UID: "9b06c0d3-97da-4a3e-b65b-dd902578a175"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.409256 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-scripts" (OuterVolumeSpecName: "scripts") pod "9b06c0d3-97da-4a3e-b65b-dd902578a175" (UID: "9b06c0d3-97da-4a3e-b65b-dd902578a175"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.409268 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b06c0d3-97da-4a3e-b65b-dd902578a175-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "9b06c0d3-97da-4a3e-b65b-dd902578a175" (UID: "9b06c0d3-97da-4a3e-b65b-dd902578a175"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.411561 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "9b06c0d3-97da-4a3e-b65b-dd902578a175" (UID: "9b06c0d3-97da-4a3e-b65b-dd902578a175"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.412004 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b06c0d3-97da-4a3e-b65b-dd902578a175-kube-api-access-zkgz2" (OuterVolumeSpecName: "kube-api-access-zkgz2") pod "9b06c0d3-97da-4a3e-b65b-dd902578a175" (UID: "9b06c0d3-97da-4a3e-b65b-dd902578a175"). InnerVolumeSpecName "kube-api-access-zkgz2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.412702 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b06c0d3-97da-4a3e-b65b-dd902578a175" (UID: "9b06c0d3-97da-4a3e-b65b-dd902578a175"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.414070 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "9b06c0d3-97da-4a3e-b65b-dd902578a175" (UID: "9b06c0d3-97da-4a3e-b65b-dd902578a175"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.510240 4932 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/9b06c0d3-97da-4a3e-b65b-dd902578a175-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.510269 4932 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.510281 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.510289 4932 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/9b06c0d3-97da-4a3e-b65b-dd902578a175-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.510299 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.510307 4932 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/9b06c0d3-97da-4a3e-b65b-dd902578a175-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:11 crc kubenswrapper[4932]: I1125 10:24:11.510315 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkgz2\" (UniqueName: \"kubernetes.io/projected/9b06c0d3-97da-4a3e-b65b-dd902578a175-kube-api-access-zkgz2\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.261557 4932 generic.go:334] "Generic (PLEG): container finished" podID="a6d6e550-47c7-40fb-84de-4603f403720d" containerID="945e876787244a7a68b8d992a3cda16055eed25ae9f958fc05d706c1da8b0e6d" exitCode=0 Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.261934 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-jkmbg" Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.261593 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" event={"ID":"a6d6e550-47c7-40fb-84de-4603f403720d","Type":"ContainerDied","Data":"945e876787244a7a68b8d992a3cda16055eed25ae9f958fc05d706c1da8b0e6d"} Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.262061 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" event={"ID":"a6d6e550-47c7-40fb-84de-4603f403720d","Type":"ContainerStarted","Data":"c5c678b7eec5329a2700edd7bb632ead28e5bcca85bf097f687fc1cf9e91776c"} Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.397392 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-jkmbg"] Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.404616 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-jkmbg"] Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.616338 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b06c0d3-97da-4a3e-b65b-dd902578a175" path="/var/lib/kubelet/pods/9b06c0d3-97da-4a3e-b65b-dd902578a175/volumes" Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.936123 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-55b997df9d-b7dnw"] Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.937564 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.939212 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.939306 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.939434 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.939834 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-56bmt" Nov 25 10:24:12 crc kubenswrapper[4932]: I1125 10:24:12.952883 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-55b997df9d-b7dnw"] Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.037902 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cnz8\" (UniqueName: \"kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-kube-api-access-2cnz8\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.038011 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-combined-ca-bundle\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.038044 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-config-data\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.038082 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-run-httpd\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.038097 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-etc-swift\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.038139 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-log-httpd\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.140122 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-combined-ca-bundle\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.140932 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-config-data\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.141000 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-run-httpd\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.141029 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-etc-swift\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.141091 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-log-httpd\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.141171 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cnz8\" (UniqueName: \"kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-kube-api-access-2cnz8\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.141900 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-log-httpd\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.141999 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-run-httpd\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.145718 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-config-data\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.146001 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-etc-swift\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.149860 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-combined-ca-bundle\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.162112 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cnz8\" (UniqueName: \"kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-kube-api-access-2cnz8\") pod \"swift-proxy-55b997df9d-b7dnw\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.272016 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" event={"ID":"a6d6e550-47c7-40fb-84de-4603f403720d","Type":"ContainerStarted","Data":"fc008c454e853da5bee0263aa2bbf89684bb8e828a3d5ef25066d9a778ff45f0"} Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.272822 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.273247 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.299057 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" podStartSLOduration=3.299034204 podStartE2EDuration="3.299034204s" podCreationTimestamp="2025-11-25 10:24:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:24:13.287628047 +0000 UTC m=+5713.413657620" watchObservedRunningTime="2025-11-25 10:24:13.299034204 +0000 UTC m=+5713.425063767" Nov 25 10:24:13 crc kubenswrapper[4932]: I1125 10:24:13.995299 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-55b997df9d-b7dnw"] Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.369039 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-55b997df9d-b7dnw" event={"ID":"1ddad29c-71a5-49d6-8088-c5663fc33555","Type":"ContainerStarted","Data":"e5efbf11ad47f3a4b3c4b78c5add2ce69bf9a6f26cd18bbb12953c65a517e4ae"} Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.560469 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6458f694-t794m"] Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.562442 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.564481 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.564758 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.576832 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6458f694-t794m"] Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.664643 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-internal-tls-certs\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.665291 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-run-httpd\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.665407 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-config-data\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.665563 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4j2xr\" (UniqueName: \"kubernetes.io/projected/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-kube-api-access-4j2xr\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.665732 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-public-tls-certs\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.665901 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-log-httpd\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.666024 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-combined-ca-bundle\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.666137 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-etc-swift\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.770436 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-log-httpd\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.770490 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-combined-ca-bundle\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.770511 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-etc-swift\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.770558 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-internal-tls-certs\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.770591 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-run-httpd\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.770606 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-config-data\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.770634 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4j2xr\" (UniqueName: \"kubernetes.io/projected/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-kube-api-access-4j2xr\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.770683 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-public-tls-certs\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.774819 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-run-httpd\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.774937 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-log-httpd\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.778686 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-internal-tls-certs\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.779353 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-etc-swift\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.780988 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-combined-ca-bundle\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.785950 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-public-tls-certs\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.797596 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-config-data\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.804154 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4j2xr\" (UniqueName: \"kubernetes.io/projected/d90bc80e-42f1-4259-b0b5-e4d8632a3dd7-kube-api-access-4j2xr\") pod \"swift-proxy-6458f694-t794m\" (UID: \"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7\") " pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:14 crc kubenswrapper[4932]: I1125 10:24:14.879674 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.389241 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-55b997df9d-b7dnw" event={"ID":"1ddad29c-71a5-49d6-8088-c5663fc33555","Type":"ContainerStarted","Data":"8efdbdd8706dea5dadf8befad13cade594bec8bf066f06f46da9654e61e46cc3"} Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.389764 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.389823 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.389836 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-55b997df9d-b7dnw" event={"ID":"1ddad29c-71a5-49d6-8088-c5663fc33555","Type":"ContainerStarted","Data":"c848052ae0a45f3660e64a283df9ade815fc030f88eb47dab9786da6a49b2eea"} Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.436074 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-55b997df9d-b7dnw" podStartSLOduration=3.436055071 podStartE2EDuration="3.436055071s" podCreationTimestamp="2025-11-25 10:24:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:24:15.420454994 +0000 UTC m=+5715.546484557" watchObservedRunningTime="2025-11-25 10:24:15.436055071 +0000 UTC m=+5715.562084634" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.575675 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6458f694-t794m"] Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.666710 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-v482s"] Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.668767 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.677322 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v482s"] Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.709419 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-catalog-content\") pod \"certified-operators-v482s\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.709587 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmntc\" (UniqueName: \"kubernetes.io/projected/62faf475-5bd6-44d5-91fb-1a423ba35af2-kube-api-access-mmntc\") pod \"certified-operators-v482s\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.709731 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-utilities\") pod \"certified-operators-v482s\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.811782 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmntc\" (UniqueName: \"kubernetes.io/projected/62faf475-5bd6-44d5-91fb-1a423ba35af2-kube-api-access-mmntc\") pod \"certified-operators-v482s\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.811888 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-utilities\") pod \"certified-operators-v482s\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.811972 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-catalog-content\") pod \"certified-operators-v482s\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.812777 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-catalog-content\") pod \"certified-operators-v482s\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.812892 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-utilities\") pod \"certified-operators-v482s\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:15 crc kubenswrapper[4932]: I1125 10:24:15.836603 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmntc\" (UniqueName: \"kubernetes.io/projected/62faf475-5bd6-44d5-91fb-1a423ba35af2-kube-api-access-mmntc\") pod \"certified-operators-v482s\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:16 crc kubenswrapper[4932]: I1125 10:24:16.072640 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:16 crc kubenswrapper[4932]: I1125 10:24:16.455034 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6458f694-t794m" event={"ID":"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7","Type":"ContainerStarted","Data":"727ce453813ad0799abadc7b798d266ccc22f5724a49b26a30462b78ffc353d4"} Nov 25 10:24:16 crc kubenswrapper[4932]: I1125 10:24:16.455385 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6458f694-t794m" event={"ID":"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7","Type":"ContainerStarted","Data":"05d970971810baa7312e51975d983da271b46cb3e253b2ac6d4acb42a084161d"} Nov 25 10:24:16 crc kubenswrapper[4932]: I1125 10:24:16.743061 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-v482s"] Nov 25 10:24:17 crc kubenswrapper[4932]: I1125 10:24:17.474879 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6458f694-t794m" event={"ID":"d90bc80e-42f1-4259-b0b5-e4d8632a3dd7","Type":"ContainerStarted","Data":"48e2f7da0c670c0bfdeb1928db316ca883a7e3da0b1a9cae31566153f9744939"} Nov 25 10:24:17 crc kubenswrapper[4932]: I1125 10:24:17.475819 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:17 crc kubenswrapper[4932]: I1125 10:24:17.476001 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:17 crc kubenswrapper[4932]: I1125 10:24:17.476716 4932 generic.go:334] "Generic (PLEG): container finished" podID="62faf475-5bd6-44d5-91fb-1a423ba35af2" containerID="408d67258803cd364c76a663662d89cf4169ddd79d403181a2a7b66a2fb83588" exitCode=0 Nov 25 10:24:17 crc kubenswrapper[4932]: I1125 10:24:17.476757 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v482s" event={"ID":"62faf475-5bd6-44d5-91fb-1a423ba35af2","Type":"ContainerDied","Data":"408d67258803cd364c76a663662d89cf4169ddd79d403181a2a7b66a2fb83588"} Nov 25 10:24:17 crc kubenswrapper[4932]: I1125 10:24:17.476781 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v482s" event={"ID":"62faf475-5bd6-44d5-91fb-1a423ba35af2","Type":"ContainerStarted","Data":"7e9b74d22af8be635d7257ff0043b095b06e9dd53ef2d379607f682bb3a625f7"} Nov 25 10:24:17 crc kubenswrapper[4932]: I1125 10:24:17.495515 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6458f694-t794m" podStartSLOduration=3.495495676 podStartE2EDuration="3.495495676s" podCreationTimestamp="2025-11-25 10:24:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:24:17.494328072 +0000 UTC m=+5717.620357635" watchObservedRunningTime="2025-11-25 10:24:17.495495676 +0000 UTC m=+5717.621525239" Nov 25 10:24:19 crc kubenswrapper[4932]: I1125 10:24:19.501954 4932 generic.go:334] "Generic (PLEG): container finished" podID="62faf475-5bd6-44d5-91fb-1a423ba35af2" containerID="08ad62c4b1fcf29adfcd8a048e22362a2f7ae064843da922c374737be8fa6b80" exitCode=0 Nov 25 10:24:19 crc kubenswrapper[4932]: I1125 10:24:19.504729 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v482s" event={"ID":"62faf475-5bd6-44d5-91fb-1a423ba35af2","Type":"ContainerDied","Data":"08ad62c4b1fcf29adfcd8a048e22362a2f7ae064843da922c374737be8fa6b80"} Nov 25 10:24:20 crc kubenswrapper[4932]: I1125 10:24:20.613715 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:24:20 crc kubenswrapper[4932]: E1125 10:24:20.614383 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:24:20 crc kubenswrapper[4932]: I1125 10:24:20.874232 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:20 crc kubenswrapper[4932]: I1125 10:24:20.939708 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-755584b759-5vxwv"] Nov 25 10:24:20 crc kubenswrapper[4932]: I1125 10:24:20.940215 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-755584b759-5vxwv" podUID="82349d87-563d-4bb6-92f3-b795bb04b09b" containerName="dnsmasq-dns" containerID="cri-o://f07819e599adc85bf50c5a1eaaf8141cd7f31a26ea4bc61924d70eedecfe186e" gracePeriod=10 Nov 25 10:24:21 crc kubenswrapper[4932]: I1125 10:24:21.542085 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v482s" event={"ID":"62faf475-5bd6-44d5-91fb-1a423ba35af2","Type":"ContainerStarted","Data":"90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67"} Nov 25 10:24:21 crc kubenswrapper[4932]: I1125 10:24:21.548166 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-755584b759-5vxwv" event={"ID":"82349d87-563d-4bb6-92f3-b795bb04b09b","Type":"ContainerDied","Data":"f07819e599adc85bf50c5a1eaaf8141cd7f31a26ea4bc61924d70eedecfe186e"} Nov 25 10:24:21 crc kubenswrapper[4932]: I1125 10:24:21.548050 4932 generic.go:334] "Generic (PLEG): container finished" podID="82349d87-563d-4bb6-92f3-b795bb04b09b" containerID="f07819e599adc85bf50c5a1eaaf8141cd7f31a26ea4bc61924d70eedecfe186e" exitCode=0 Nov 25 10:24:21 crc kubenswrapper[4932]: I1125 10:24:21.574798 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-v482s" podStartSLOduration=3.45674073 podStartE2EDuration="6.574771609s" podCreationTimestamp="2025-11-25 10:24:15 +0000 UTC" firstStartedPulling="2025-11-25 10:24:17.478516619 +0000 UTC m=+5717.604546182" lastFinishedPulling="2025-11-25 10:24:20.596547498 +0000 UTC m=+5720.722577061" observedRunningTime="2025-11-25 10:24:21.560557101 +0000 UTC m=+5721.686586684" watchObservedRunningTime="2025-11-25 10:24:21.574771609 +0000 UTC m=+5721.700801172" Nov 25 10:24:21 crc kubenswrapper[4932]: I1125 10:24:21.940255 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:24:21 crc kubenswrapper[4932]: I1125 10:24:21.972840 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-nb\") pod \"82349d87-563d-4bb6-92f3-b795bb04b09b\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " Nov 25 10:24:21 crc kubenswrapper[4932]: I1125 10:24:21.973156 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-sb\") pod \"82349d87-563d-4bb6-92f3-b795bb04b09b\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " Nov 25 10:24:21 crc kubenswrapper[4932]: I1125 10:24:21.973327 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9p42\" (UniqueName: \"kubernetes.io/projected/82349d87-563d-4bb6-92f3-b795bb04b09b-kube-api-access-x9p42\") pod \"82349d87-563d-4bb6-92f3-b795bb04b09b\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " Nov 25 10:24:21 crc kubenswrapper[4932]: I1125 10:24:21.973473 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-dns-svc\") pod \"82349d87-563d-4bb6-92f3-b795bb04b09b\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " Nov 25 10:24:21 crc kubenswrapper[4932]: I1125 10:24:21.973562 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-config\") pod \"82349d87-563d-4bb6-92f3-b795bb04b09b\" (UID: \"82349d87-563d-4bb6-92f3-b795bb04b09b\") " Nov 25 10:24:21 crc kubenswrapper[4932]: I1125 10:24:21.984454 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82349d87-563d-4bb6-92f3-b795bb04b09b-kube-api-access-x9p42" (OuterVolumeSpecName: "kube-api-access-x9p42") pod "82349d87-563d-4bb6-92f3-b795bb04b09b" (UID: "82349d87-563d-4bb6-92f3-b795bb04b09b"). InnerVolumeSpecName "kube-api-access-x9p42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.026577 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "82349d87-563d-4bb6-92f3-b795bb04b09b" (UID: "82349d87-563d-4bb6-92f3-b795bb04b09b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.034573 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-config" (OuterVolumeSpecName: "config") pod "82349d87-563d-4bb6-92f3-b795bb04b09b" (UID: "82349d87-563d-4bb6-92f3-b795bb04b09b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.040729 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "82349d87-563d-4bb6-92f3-b795bb04b09b" (UID: "82349d87-563d-4bb6-92f3-b795bb04b09b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.056101 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "82349d87-563d-4bb6-92f3-b795bb04b09b" (UID: "82349d87-563d-4bb6-92f3-b795bb04b09b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.076033 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9p42\" (UniqueName: \"kubernetes.io/projected/82349d87-563d-4bb6-92f3-b795bb04b09b-kube-api-access-x9p42\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.076217 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.076318 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.076397 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.076464 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/82349d87-563d-4bb6-92f3-b795bb04b09b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.558160 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-755584b759-5vxwv" event={"ID":"82349d87-563d-4bb6-92f3-b795bb04b09b","Type":"ContainerDied","Data":"db30b8f5af288034ca98f8b642e3335f7eba7240131ed40fab23ef402bc70668"} Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.558534 4932 scope.go:117] "RemoveContainer" containerID="f07819e599adc85bf50c5a1eaaf8141cd7f31a26ea4bc61924d70eedecfe186e" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.558171 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-755584b759-5vxwv" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.588105 4932 scope.go:117] "RemoveContainer" containerID="7c3431caad0783948b03ab22cff257c867eb924fd0d110f78d293ab4bcd7c066" Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.591372 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-755584b759-5vxwv"] Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.598012 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-755584b759-5vxwv"] Nov 25 10:24:22 crc kubenswrapper[4932]: I1125 10:24:22.618298 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82349d87-563d-4bb6-92f3-b795bb04b09b" path="/var/lib/kubelet/pods/82349d87-563d-4bb6-92f3-b795bb04b09b/volumes" Nov 25 10:24:23 crc kubenswrapper[4932]: I1125 10:24:23.275946 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:23 crc kubenswrapper[4932]: I1125 10:24:23.277136 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:24 crc kubenswrapper[4932]: I1125 10:24:24.884837 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:24 crc kubenswrapper[4932]: I1125 10:24:24.885393 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6458f694-t794m" Nov 25 10:24:24 crc kubenswrapper[4932]: I1125 10:24:24.961895 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-55b997df9d-b7dnw"] Nov 25 10:24:24 crc kubenswrapper[4932]: I1125 10:24:24.962153 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-55b997df9d-b7dnw" podUID="1ddad29c-71a5-49d6-8088-c5663fc33555" containerName="proxy-httpd" containerID="cri-o://c848052ae0a45f3660e64a283df9ade815fc030f88eb47dab9786da6a49b2eea" gracePeriod=30 Nov 25 10:24:24 crc kubenswrapper[4932]: I1125 10:24:24.962336 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-55b997df9d-b7dnw" podUID="1ddad29c-71a5-49d6-8088-c5663fc33555" containerName="proxy-server" containerID="cri-o://8efdbdd8706dea5dadf8befad13cade594bec8bf066f06f46da9654e61e46cc3" gracePeriod=30 Nov 25 10:24:25 crc kubenswrapper[4932]: I1125 10:24:25.585096 4932 generic.go:334] "Generic (PLEG): container finished" podID="1ddad29c-71a5-49d6-8088-c5663fc33555" containerID="c848052ae0a45f3660e64a283df9ade815fc030f88eb47dab9786da6a49b2eea" exitCode=0 Nov 25 10:24:25 crc kubenswrapper[4932]: I1125 10:24:25.585952 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-55b997df9d-b7dnw" event={"ID":"1ddad29c-71a5-49d6-8088-c5663fc33555","Type":"ContainerDied","Data":"c848052ae0a45f3660e64a283df9ade815fc030f88eb47dab9786da6a49b2eea"} Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:26.073421 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:26.074439 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:26.136753 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:26.594135 4932 generic.go:334] "Generic (PLEG): container finished" podID="1ddad29c-71a5-49d6-8088-c5663fc33555" containerID="8efdbdd8706dea5dadf8befad13cade594bec8bf066f06f46da9654e61e46cc3" exitCode=0 Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:26.594236 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-55b997df9d-b7dnw" event={"ID":"1ddad29c-71a5-49d6-8088-c5663fc33555","Type":"ContainerDied","Data":"8efdbdd8706dea5dadf8befad13cade594bec8bf066f06f46da9654e61e46cc3"} Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:26.637306 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:26.688540 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v482s"] Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.689936 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.797628 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-combined-ca-bundle\") pod \"1ddad29c-71a5-49d6-8088-c5663fc33555\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.797742 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-config-data\") pod \"1ddad29c-71a5-49d6-8088-c5663fc33555\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.797805 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-log-httpd\") pod \"1ddad29c-71a5-49d6-8088-c5663fc33555\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.797907 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-run-httpd\") pod \"1ddad29c-71a5-49d6-8088-c5663fc33555\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.797948 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cnz8\" (UniqueName: \"kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-kube-api-access-2cnz8\") pod \"1ddad29c-71a5-49d6-8088-c5663fc33555\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.797977 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-etc-swift\") pod \"1ddad29c-71a5-49d6-8088-c5663fc33555\" (UID: \"1ddad29c-71a5-49d6-8088-c5663fc33555\") " Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.800447 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1ddad29c-71a5-49d6-8088-c5663fc33555" (UID: "1ddad29c-71a5-49d6-8088-c5663fc33555"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.807135 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1ddad29c-71a5-49d6-8088-c5663fc33555" (UID: "1ddad29c-71a5-49d6-8088-c5663fc33555"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.813374 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "1ddad29c-71a5-49d6-8088-c5663fc33555" (UID: "1ddad29c-71a5-49d6-8088-c5663fc33555"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.813456 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-kube-api-access-2cnz8" (OuterVolumeSpecName: "kube-api-access-2cnz8") pod "1ddad29c-71a5-49d6-8088-c5663fc33555" (UID: "1ddad29c-71a5-49d6-8088-c5663fc33555"). InnerVolumeSpecName "kube-api-access-2cnz8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.880384 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-config-data" (OuterVolumeSpecName: "config-data") pod "1ddad29c-71a5-49d6-8088-c5663fc33555" (UID: "1ddad29c-71a5-49d6-8088-c5663fc33555"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.887488 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1ddad29c-71a5-49d6-8088-c5663fc33555" (UID: "1ddad29c-71a5-49d6-8088-c5663fc33555"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.901360 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.901412 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cnz8\" (UniqueName: \"kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-kube-api-access-2cnz8\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.901421 4932 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1ddad29c-71a5-49d6-8088-c5663fc33555-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.901430 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.901438 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ddad29c-71a5-49d6-8088-c5663fc33555-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:27 crc kubenswrapper[4932]: I1125 10:24:27.901447 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1ddad29c-71a5-49d6-8088-c5663fc33555-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:28 crc kubenswrapper[4932]: I1125 10:24:28.614680 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-55b997df9d-b7dnw" Nov 25 10:24:28 crc kubenswrapper[4932]: I1125 10:24:28.614848 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-v482s" podUID="62faf475-5bd6-44d5-91fb-1a423ba35af2" containerName="registry-server" containerID="cri-o://90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67" gracePeriod=2 Nov 25 10:24:28 crc kubenswrapper[4932]: I1125 10:24:28.619789 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-55b997df9d-b7dnw" event={"ID":"1ddad29c-71a5-49d6-8088-c5663fc33555","Type":"ContainerDied","Data":"e5efbf11ad47f3a4b3c4b78c5add2ce69bf9a6f26cd18bbb12953c65a517e4ae"} Nov 25 10:24:28 crc kubenswrapper[4932]: I1125 10:24:28.619886 4932 scope.go:117] "RemoveContainer" containerID="8efdbdd8706dea5dadf8befad13cade594bec8bf066f06f46da9654e61e46cc3" Nov 25 10:24:28 crc kubenswrapper[4932]: I1125 10:24:28.659563 4932 scope.go:117] "RemoveContainer" containerID="c848052ae0a45f3660e64a283df9ade815fc030f88eb47dab9786da6a49b2eea" Nov 25 10:24:28 crc kubenswrapper[4932]: I1125 10:24:28.665280 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-55b997df9d-b7dnw"] Nov 25 10:24:28 crc kubenswrapper[4932]: I1125 10:24:28.674378 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-55b997df9d-b7dnw"] Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.097667 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.125941 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmntc\" (UniqueName: \"kubernetes.io/projected/62faf475-5bd6-44d5-91fb-1a423ba35af2-kube-api-access-mmntc\") pod \"62faf475-5bd6-44d5-91fb-1a423ba35af2\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.126006 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-catalog-content\") pod \"62faf475-5bd6-44d5-91fb-1a423ba35af2\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.126031 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-utilities\") pod \"62faf475-5bd6-44d5-91fb-1a423ba35af2\" (UID: \"62faf475-5bd6-44d5-91fb-1a423ba35af2\") " Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.127090 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-utilities" (OuterVolumeSpecName: "utilities") pod "62faf475-5bd6-44d5-91fb-1a423ba35af2" (UID: "62faf475-5bd6-44d5-91fb-1a423ba35af2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.130505 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62faf475-5bd6-44d5-91fb-1a423ba35af2-kube-api-access-mmntc" (OuterVolumeSpecName: "kube-api-access-mmntc") pod "62faf475-5bd6-44d5-91fb-1a423ba35af2" (UID: "62faf475-5bd6-44d5-91fb-1a423ba35af2"). InnerVolumeSpecName "kube-api-access-mmntc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.180212 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "62faf475-5bd6-44d5-91fb-1a423ba35af2" (UID: "62faf475-5bd6-44d5-91fb-1a423ba35af2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.227857 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmntc\" (UniqueName: \"kubernetes.io/projected/62faf475-5bd6-44d5-91fb-1a423ba35af2-kube-api-access-mmntc\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.227900 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.227914 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62faf475-5bd6-44d5-91fb-1a423ba35af2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.640108 4932 generic.go:334] "Generic (PLEG): container finished" podID="62faf475-5bd6-44d5-91fb-1a423ba35af2" containerID="90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67" exitCode=0 Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.640174 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v482s" event={"ID":"62faf475-5bd6-44d5-91fb-1a423ba35af2","Type":"ContainerDied","Data":"90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67"} Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.640222 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-v482s" event={"ID":"62faf475-5bd6-44d5-91fb-1a423ba35af2","Type":"ContainerDied","Data":"7e9b74d22af8be635d7257ff0043b095b06e9dd53ef2d379607f682bb3a625f7"} Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.640242 4932 scope.go:117] "RemoveContainer" containerID="90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.640351 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-v482s" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.684603 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-v482s"] Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.687490 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-v482s"] Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.691514 4932 scope.go:117] "RemoveContainer" containerID="08ad62c4b1fcf29adfcd8a048e22362a2f7ae064843da922c374737be8fa6b80" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.732059 4932 scope.go:117] "RemoveContainer" containerID="408d67258803cd364c76a663662d89cf4169ddd79d403181a2a7b66a2fb83588" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.768414 4932 scope.go:117] "RemoveContainer" containerID="90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67" Nov 25 10:24:29 crc kubenswrapper[4932]: E1125 10:24:29.769638 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67\": container with ID starting with 90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67 not found: ID does not exist" containerID="90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.769694 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67"} err="failed to get container status \"90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67\": rpc error: code = NotFound desc = could not find container \"90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67\": container with ID starting with 90e7b9479c6455b56dda8fc920242183dac86920a3e42b606e1358280f85dd67 not found: ID does not exist" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.769743 4932 scope.go:117] "RemoveContainer" containerID="08ad62c4b1fcf29adfcd8a048e22362a2f7ae064843da922c374737be8fa6b80" Nov 25 10:24:29 crc kubenswrapper[4932]: E1125 10:24:29.770895 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08ad62c4b1fcf29adfcd8a048e22362a2f7ae064843da922c374737be8fa6b80\": container with ID starting with 08ad62c4b1fcf29adfcd8a048e22362a2f7ae064843da922c374737be8fa6b80 not found: ID does not exist" containerID="08ad62c4b1fcf29adfcd8a048e22362a2f7ae064843da922c374737be8fa6b80" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.770934 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08ad62c4b1fcf29adfcd8a048e22362a2f7ae064843da922c374737be8fa6b80"} err="failed to get container status \"08ad62c4b1fcf29adfcd8a048e22362a2f7ae064843da922c374737be8fa6b80\": rpc error: code = NotFound desc = could not find container \"08ad62c4b1fcf29adfcd8a048e22362a2f7ae064843da922c374737be8fa6b80\": container with ID starting with 08ad62c4b1fcf29adfcd8a048e22362a2f7ae064843da922c374737be8fa6b80 not found: ID does not exist" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.770974 4932 scope.go:117] "RemoveContainer" containerID="408d67258803cd364c76a663662d89cf4169ddd79d403181a2a7b66a2fb83588" Nov 25 10:24:29 crc kubenswrapper[4932]: E1125 10:24:29.782319 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"408d67258803cd364c76a663662d89cf4169ddd79d403181a2a7b66a2fb83588\": container with ID starting with 408d67258803cd364c76a663662d89cf4169ddd79d403181a2a7b66a2fb83588 not found: ID does not exist" containerID="408d67258803cd364c76a663662d89cf4169ddd79d403181a2a7b66a2fb83588" Nov 25 10:24:29 crc kubenswrapper[4932]: I1125 10:24:29.782395 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"408d67258803cd364c76a663662d89cf4169ddd79d403181a2a7b66a2fb83588"} err="failed to get container status \"408d67258803cd364c76a663662d89cf4169ddd79d403181a2a7b66a2fb83588\": rpc error: code = NotFound desc = could not find container \"408d67258803cd364c76a663662d89cf4169ddd79d403181a2a7b66a2fb83588\": container with ID starting with 408d67258803cd364c76a663662d89cf4169ddd79d403181a2a7b66a2fb83588 not found: ID does not exist" Nov 25 10:24:30 crc kubenswrapper[4932]: I1125 10:24:30.628307 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ddad29c-71a5-49d6-8088-c5663fc33555" path="/var/lib/kubelet/pods/1ddad29c-71a5-49d6-8088-c5663fc33555/volumes" Nov 25 10:24:30 crc kubenswrapper[4932]: I1125 10:24:30.629757 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62faf475-5bd6-44d5-91fb-1a423ba35af2" path="/var/lib/kubelet/pods/62faf475-5bd6-44d5-91fb-1a423ba35af2/volumes" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.309352 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-zs9cx"] Nov 25 10:24:31 crc kubenswrapper[4932]: E1125 10:24:31.310718 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ddad29c-71a5-49d6-8088-c5663fc33555" containerName="proxy-server" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.310783 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ddad29c-71a5-49d6-8088-c5663fc33555" containerName="proxy-server" Nov 25 10:24:31 crc kubenswrapper[4932]: E1125 10:24:31.310830 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62faf475-5bd6-44d5-91fb-1a423ba35af2" containerName="extract-content" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.310902 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="62faf475-5bd6-44d5-91fb-1a423ba35af2" containerName="extract-content" Nov 25 10:24:31 crc kubenswrapper[4932]: E1125 10:24:31.310962 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82349d87-563d-4bb6-92f3-b795bb04b09b" containerName="init" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.311018 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="82349d87-563d-4bb6-92f3-b795bb04b09b" containerName="init" Nov 25 10:24:31 crc kubenswrapper[4932]: E1125 10:24:31.311087 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62faf475-5bd6-44d5-91fb-1a423ba35af2" containerName="registry-server" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.311148 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="62faf475-5bd6-44d5-91fb-1a423ba35af2" containerName="registry-server" Nov 25 10:24:31 crc kubenswrapper[4932]: E1125 10:24:31.311245 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ddad29c-71a5-49d6-8088-c5663fc33555" containerName="proxy-httpd" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.311317 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ddad29c-71a5-49d6-8088-c5663fc33555" containerName="proxy-httpd" Nov 25 10:24:31 crc kubenswrapper[4932]: E1125 10:24:31.311379 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82349d87-563d-4bb6-92f3-b795bb04b09b" containerName="dnsmasq-dns" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.311425 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="82349d87-563d-4bb6-92f3-b795bb04b09b" containerName="dnsmasq-dns" Nov 25 10:24:31 crc kubenswrapper[4932]: E1125 10:24:31.311506 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62faf475-5bd6-44d5-91fb-1a423ba35af2" containerName="extract-utilities" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.311566 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="62faf475-5bd6-44d5-91fb-1a423ba35af2" containerName="extract-utilities" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.311776 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ddad29c-71a5-49d6-8088-c5663fc33555" containerName="proxy-server" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.311840 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="62faf475-5bd6-44d5-91fb-1a423ba35af2" containerName="registry-server" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.311895 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="82349d87-563d-4bb6-92f3-b795bb04b09b" containerName="dnsmasq-dns" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.311938 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ddad29c-71a5-49d6-8088-c5663fc33555" containerName="proxy-httpd" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.312579 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-zs9cx" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.320109 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-zs9cx"] Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.361942 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8vnt\" (UniqueName: \"kubernetes.io/projected/365361a1-846d-43f6-9423-e09278bb603b-kube-api-access-z8vnt\") pod \"cinder-db-create-zs9cx\" (UID: \"365361a1-846d-43f6-9423-e09278bb603b\") " pod="openstack/cinder-db-create-zs9cx" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.362052 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/365361a1-846d-43f6-9423-e09278bb603b-operator-scripts\") pod \"cinder-db-create-zs9cx\" (UID: \"365361a1-846d-43f6-9423-e09278bb603b\") " pod="openstack/cinder-db-create-zs9cx" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.414664 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-2beb-account-create-f77ql"] Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.415817 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-2beb-account-create-f77ql" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.417726 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.426307 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-2beb-account-create-f77ql"] Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.464144 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/365361a1-846d-43f6-9423-e09278bb603b-operator-scripts\") pod \"cinder-db-create-zs9cx\" (UID: \"365361a1-846d-43f6-9423-e09278bb603b\") " pod="openstack/cinder-db-create-zs9cx" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.464587 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8vnt\" (UniqueName: \"kubernetes.io/projected/365361a1-846d-43f6-9423-e09278bb603b-kube-api-access-z8vnt\") pod \"cinder-db-create-zs9cx\" (UID: \"365361a1-846d-43f6-9423-e09278bb603b\") " pod="openstack/cinder-db-create-zs9cx" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.465826 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/365361a1-846d-43f6-9423-e09278bb603b-operator-scripts\") pod \"cinder-db-create-zs9cx\" (UID: \"365361a1-846d-43f6-9423-e09278bb603b\") " pod="openstack/cinder-db-create-zs9cx" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.485470 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8vnt\" (UniqueName: \"kubernetes.io/projected/365361a1-846d-43f6-9423-e09278bb603b-kube-api-access-z8vnt\") pod \"cinder-db-create-zs9cx\" (UID: \"365361a1-846d-43f6-9423-e09278bb603b\") " pod="openstack/cinder-db-create-zs9cx" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.566674 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8bc4a14-5db6-46c1-88e9-448799276e2e-operator-scripts\") pod \"cinder-2beb-account-create-f77ql\" (UID: \"e8bc4a14-5db6-46c1-88e9-448799276e2e\") " pod="openstack/cinder-2beb-account-create-f77ql" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.567123 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqwfv\" (UniqueName: \"kubernetes.io/projected/e8bc4a14-5db6-46c1-88e9-448799276e2e-kube-api-access-bqwfv\") pod \"cinder-2beb-account-create-f77ql\" (UID: \"e8bc4a14-5db6-46c1-88e9-448799276e2e\") " pod="openstack/cinder-2beb-account-create-f77ql" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.629452 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-zs9cx" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.669054 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8bc4a14-5db6-46c1-88e9-448799276e2e-operator-scripts\") pod \"cinder-2beb-account-create-f77ql\" (UID: \"e8bc4a14-5db6-46c1-88e9-448799276e2e\") " pod="openstack/cinder-2beb-account-create-f77ql" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.669347 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqwfv\" (UniqueName: \"kubernetes.io/projected/e8bc4a14-5db6-46c1-88e9-448799276e2e-kube-api-access-bqwfv\") pod \"cinder-2beb-account-create-f77ql\" (UID: \"e8bc4a14-5db6-46c1-88e9-448799276e2e\") " pod="openstack/cinder-2beb-account-create-f77ql" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.670362 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8bc4a14-5db6-46c1-88e9-448799276e2e-operator-scripts\") pod \"cinder-2beb-account-create-f77ql\" (UID: \"e8bc4a14-5db6-46c1-88e9-448799276e2e\") " pod="openstack/cinder-2beb-account-create-f77ql" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.687274 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqwfv\" (UniqueName: \"kubernetes.io/projected/e8bc4a14-5db6-46c1-88e9-448799276e2e-kube-api-access-bqwfv\") pod \"cinder-2beb-account-create-f77ql\" (UID: \"e8bc4a14-5db6-46c1-88e9-448799276e2e\") " pod="openstack/cinder-2beb-account-create-f77ql" Nov 25 10:24:31 crc kubenswrapper[4932]: I1125 10:24:31.734380 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-2beb-account-create-f77ql" Nov 25 10:24:32 crc kubenswrapper[4932]: I1125 10:24:32.098566 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-zs9cx"] Nov 25 10:24:32 crc kubenswrapper[4932]: W1125 10:24:32.106109 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod365361a1_846d_43f6_9423_e09278bb603b.slice/crio-ad493379c96987250116a94445c0553073806ffe45b9cfd59a63872818b71edd WatchSource:0}: Error finding container ad493379c96987250116a94445c0553073806ffe45b9cfd59a63872818b71edd: Status 404 returned error can't find the container with id ad493379c96987250116a94445c0553073806ffe45b9cfd59a63872818b71edd Nov 25 10:24:32 crc kubenswrapper[4932]: I1125 10:24:32.237724 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-2beb-account-create-f77ql"] Nov 25 10:24:32 crc kubenswrapper[4932]: W1125 10:24:32.240146 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8bc4a14_5db6_46c1_88e9_448799276e2e.slice/crio-75f86146611fbff523c018c50f7d86e17a767e6e7821bd398dc481fd1f8a00a1 WatchSource:0}: Error finding container 75f86146611fbff523c018c50f7d86e17a767e6e7821bd398dc481fd1f8a00a1: Status 404 returned error can't find the container with id 75f86146611fbff523c018c50f7d86e17a767e6e7821bd398dc481fd1f8a00a1 Nov 25 10:24:32 crc kubenswrapper[4932]: I1125 10:24:32.695339 4932 generic.go:334] "Generic (PLEG): container finished" podID="365361a1-846d-43f6-9423-e09278bb603b" containerID="cac2c89ed9e7906ab866e883a38db59eb2d2e0716dc7661b0897b4db7f840df5" exitCode=0 Nov 25 10:24:32 crc kubenswrapper[4932]: I1125 10:24:32.695470 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-zs9cx" event={"ID":"365361a1-846d-43f6-9423-e09278bb603b","Type":"ContainerDied","Data":"cac2c89ed9e7906ab866e883a38db59eb2d2e0716dc7661b0897b4db7f840df5"} Nov 25 10:24:32 crc kubenswrapper[4932]: I1125 10:24:32.695985 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-zs9cx" event={"ID":"365361a1-846d-43f6-9423-e09278bb603b","Type":"ContainerStarted","Data":"ad493379c96987250116a94445c0553073806ffe45b9cfd59a63872818b71edd"} Nov 25 10:24:32 crc kubenswrapper[4932]: I1125 10:24:32.699756 4932 generic.go:334] "Generic (PLEG): container finished" podID="e8bc4a14-5db6-46c1-88e9-448799276e2e" containerID="cd97c76aa1c46dbbc0948e04e2b05305c6fc14ed57dbf2f5d5635dd00e7eeb94" exitCode=0 Nov 25 10:24:32 crc kubenswrapper[4932]: I1125 10:24:32.699812 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-2beb-account-create-f77ql" event={"ID":"e8bc4a14-5db6-46c1-88e9-448799276e2e","Type":"ContainerDied","Data":"cd97c76aa1c46dbbc0948e04e2b05305c6fc14ed57dbf2f5d5635dd00e7eeb94"} Nov 25 10:24:32 crc kubenswrapper[4932]: I1125 10:24:32.699864 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-2beb-account-create-f77ql" event={"ID":"e8bc4a14-5db6-46c1-88e9-448799276e2e","Type":"ContainerStarted","Data":"75f86146611fbff523c018c50f7d86e17a767e6e7821bd398dc481fd1f8a00a1"} Nov 25 10:24:33 crc kubenswrapper[4932]: I1125 10:24:33.605588 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:24:33 crc kubenswrapper[4932]: E1125 10:24:33.605973 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.253168 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-2beb-account-create-f77ql" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.260771 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-zs9cx" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.419845 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqwfv\" (UniqueName: \"kubernetes.io/projected/e8bc4a14-5db6-46c1-88e9-448799276e2e-kube-api-access-bqwfv\") pod \"e8bc4a14-5db6-46c1-88e9-448799276e2e\" (UID: \"e8bc4a14-5db6-46c1-88e9-448799276e2e\") " Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.419914 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8vnt\" (UniqueName: \"kubernetes.io/projected/365361a1-846d-43f6-9423-e09278bb603b-kube-api-access-z8vnt\") pod \"365361a1-846d-43f6-9423-e09278bb603b\" (UID: \"365361a1-846d-43f6-9423-e09278bb603b\") " Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.420075 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8bc4a14-5db6-46c1-88e9-448799276e2e-operator-scripts\") pod \"e8bc4a14-5db6-46c1-88e9-448799276e2e\" (UID: \"e8bc4a14-5db6-46c1-88e9-448799276e2e\") " Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.420109 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/365361a1-846d-43f6-9423-e09278bb603b-operator-scripts\") pod \"365361a1-846d-43f6-9423-e09278bb603b\" (UID: \"365361a1-846d-43f6-9423-e09278bb603b\") " Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.421011 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8bc4a14-5db6-46c1-88e9-448799276e2e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e8bc4a14-5db6-46c1-88e9-448799276e2e" (UID: "e8bc4a14-5db6-46c1-88e9-448799276e2e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.421022 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/365361a1-846d-43f6-9423-e09278bb603b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "365361a1-846d-43f6-9423-e09278bb603b" (UID: "365361a1-846d-43f6-9423-e09278bb603b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.427201 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/365361a1-846d-43f6-9423-e09278bb603b-kube-api-access-z8vnt" (OuterVolumeSpecName: "kube-api-access-z8vnt") pod "365361a1-846d-43f6-9423-e09278bb603b" (UID: "365361a1-846d-43f6-9423-e09278bb603b"). InnerVolumeSpecName "kube-api-access-z8vnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.430767 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8bc4a14-5db6-46c1-88e9-448799276e2e-kube-api-access-bqwfv" (OuterVolumeSpecName: "kube-api-access-bqwfv") pod "e8bc4a14-5db6-46c1-88e9-448799276e2e" (UID: "e8bc4a14-5db6-46c1-88e9-448799276e2e"). InnerVolumeSpecName "kube-api-access-bqwfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.522120 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8bc4a14-5db6-46c1-88e9-448799276e2e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.522163 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/365361a1-846d-43f6-9423-e09278bb603b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.522172 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqwfv\" (UniqueName: \"kubernetes.io/projected/e8bc4a14-5db6-46c1-88e9-448799276e2e-kube-api-access-bqwfv\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.522196 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8vnt\" (UniqueName: \"kubernetes.io/projected/365361a1-846d-43f6-9423-e09278bb603b-kube-api-access-z8vnt\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.717554 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-2beb-account-create-f77ql" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.717601 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-2beb-account-create-f77ql" event={"ID":"e8bc4a14-5db6-46c1-88e9-448799276e2e","Type":"ContainerDied","Data":"75f86146611fbff523c018c50f7d86e17a767e6e7821bd398dc481fd1f8a00a1"} Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.718253 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75f86146611fbff523c018c50f7d86e17a767e6e7821bd398dc481fd1f8a00a1" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.719271 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-zs9cx" event={"ID":"365361a1-846d-43f6-9423-e09278bb603b","Type":"ContainerDied","Data":"ad493379c96987250116a94445c0553073806ffe45b9cfd59a63872818b71edd"} Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.719300 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad493379c96987250116a94445c0553073806ffe45b9cfd59a63872818b71edd" Nov 25 10:24:34 crc kubenswrapper[4932]: I1125 10:24:34.719321 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-zs9cx" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.559677 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-mwtbt"] Nov 25 10:24:36 crc kubenswrapper[4932]: E1125 10:24:36.560431 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8bc4a14-5db6-46c1-88e9-448799276e2e" containerName="mariadb-account-create" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.560448 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8bc4a14-5db6-46c1-88e9-448799276e2e" containerName="mariadb-account-create" Nov 25 10:24:36 crc kubenswrapper[4932]: E1125 10:24:36.560509 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="365361a1-846d-43f6-9423-e09278bb603b" containerName="mariadb-database-create" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.560516 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="365361a1-846d-43f6-9423-e09278bb603b" containerName="mariadb-database-create" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.560694 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="365361a1-846d-43f6-9423-e09278bb603b" containerName="mariadb-database-create" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.560703 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8bc4a14-5db6-46c1-88e9-448799276e2e" containerName="mariadb-account-create" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.561456 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.563297 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.563424 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-sxl4j" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.563729 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.576579 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-mwtbt"] Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.659649 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-scripts\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.659739 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-db-sync-config-data\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.659913 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwl87\" (UniqueName: \"kubernetes.io/projected/5deeb987-1aff-4645-9018-492f7517dcc6-kube-api-access-nwl87\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.659972 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-config-data\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.660236 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5deeb987-1aff-4645-9018-492f7517dcc6-etc-machine-id\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.660294 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-combined-ca-bundle\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.761978 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5deeb987-1aff-4645-9018-492f7517dcc6-etc-machine-id\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.762044 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-combined-ca-bundle\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.762134 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-scripts\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.762130 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5deeb987-1aff-4645-9018-492f7517dcc6-etc-machine-id\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.762353 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-db-sync-config-data\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.762411 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwl87\" (UniqueName: \"kubernetes.io/projected/5deeb987-1aff-4645-9018-492f7517dcc6-kube-api-access-nwl87\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.762436 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-config-data\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.770798 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-scripts\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.773499 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-config-data\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.775424 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-db-sync-config-data\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.785331 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-combined-ca-bundle\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.789467 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwl87\" (UniqueName: \"kubernetes.io/projected/5deeb987-1aff-4645-9018-492f7517dcc6-kube-api-access-nwl87\") pod \"cinder-db-sync-mwtbt\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:36 crc kubenswrapper[4932]: I1125 10:24:36.884181 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:37 crc kubenswrapper[4932]: I1125 10:24:37.328629 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-mwtbt"] Nov 25 10:24:37 crc kubenswrapper[4932]: I1125 10:24:37.743223 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-mwtbt" event={"ID":"5deeb987-1aff-4645-9018-492f7517dcc6","Type":"ContainerStarted","Data":"17f731fc20d9717379edb5b2f1a4bd6b56150a183cb04d6771d6b7480594a0f1"} Nov 25 10:24:38 crc kubenswrapper[4932]: I1125 10:24:38.752996 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-mwtbt" event={"ID":"5deeb987-1aff-4645-9018-492f7517dcc6","Type":"ContainerStarted","Data":"05d101558d87b65a0935ca3040aae225d8b500b789164b830ff10b54c74cd860"} Nov 25 10:24:38 crc kubenswrapper[4932]: I1125 10:24:38.780902 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-mwtbt" podStartSLOduration=2.780863978 podStartE2EDuration="2.780863978s" podCreationTimestamp="2025-11-25 10:24:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:24:38.769932244 +0000 UTC m=+5738.895961817" watchObservedRunningTime="2025-11-25 10:24:38.780863978 +0000 UTC m=+5738.906893541" Nov 25 10:24:42 crc kubenswrapper[4932]: I1125 10:24:42.799849 4932 generic.go:334] "Generic (PLEG): container finished" podID="5deeb987-1aff-4645-9018-492f7517dcc6" containerID="05d101558d87b65a0935ca3040aae225d8b500b789164b830ff10b54c74cd860" exitCode=0 Nov 25 10:24:42 crc kubenswrapper[4932]: I1125 10:24:42.799955 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-mwtbt" event={"ID":"5deeb987-1aff-4645-9018-492f7517dcc6","Type":"ContainerDied","Data":"05d101558d87b65a0935ca3040aae225d8b500b789164b830ff10b54c74cd860"} Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.173470 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.307322 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5deeb987-1aff-4645-9018-492f7517dcc6-etc-machine-id\") pod \"5deeb987-1aff-4645-9018-492f7517dcc6\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.307718 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-scripts\") pod \"5deeb987-1aff-4645-9018-492f7517dcc6\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.307762 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-db-sync-config-data\") pod \"5deeb987-1aff-4645-9018-492f7517dcc6\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.307587 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5deeb987-1aff-4645-9018-492f7517dcc6-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5deeb987-1aff-4645-9018-492f7517dcc6" (UID: "5deeb987-1aff-4645-9018-492f7517dcc6"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.307813 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwl87\" (UniqueName: \"kubernetes.io/projected/5deeb987-1aff-4645-9018-492f7517dcc6-kube-api-access-nwl87\") pod \"5deeb987-1aff-4645-9018-492f7517dcc6\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.307836 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-config-data\") pod \"5deeb987-1aff-4645-9018-492f7517dcc6\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.308005 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-combined-ca-bundle\") pod \"5deeb987-1aff-4645-9018-492f7517dcc6\" (UID: \"5deeb987-1aff-4645-9018-492f7517dcc6\") " Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.308597 4932 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5deeb987-1aff-4645-9018-492f7517dcc6-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.314523 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "5deeb987-1aff-4645-9018-492f7517dcc6" (UID: "5deeb987-1aff-4645-9018-492f7517dcc6"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.314532 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5deeb987-1aff-4645-9018-492f7517dcc6-kube-api-access-nwl87" (OuterVolumeSpecName: "kube-api-access-nwl87") pod "5deeb987-1aff-4645-9018-492f7517dcc6" (UID: "5deeb987-1aff-4645-9018-492f7517dcc6"). InnerVolumeSpecName "kube-api-access-nwl87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.315368 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-scripts" (OuterVolumeSpecName: "scripts") pod "5deeb987-1aff-4645-9018-492f7517dcc6" (UID: "5deeb987-1aff-4645-9018-492f7517dcc6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.338939 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5deeb987-1aff-4645-9018-492f7517dcc6" (UID: "5deeb987-1aff-4645-9018-492f7517dcc6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.360255 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-config-data" (OuterVolumeSpecName: "config-data") pod "5deeb987-1aff-4645-9018-492f7517dcc6" (UID: "5deeb987-1aff-4645-9018-492f7517dcc6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.409716 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.409891 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.409967 4932 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.410054 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwl87\" (UniqueName: \"kubernetes.io/projected/5deeb987-1aff-4645-9018-492f7517dcc6-kube-api-access-nwl87\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.410108 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5deeb987-1aff-4645-9018-492f7517dcc6-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.817887 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-mwtbt" event={"ID":"5deeb987-1aff-4645-9018-492f7517dcc6","Type":"ContainerDied","Data":"17f731fc20d9717379edb5b2f1a4bd6b56150a183cb04d6771d6b7480594a0f1"} Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.817926 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="17f731fc20d9717379edb5b2f1a4bd6b56150a183cb04d6771d6b7480594a0f1" Nov 25 10:24:44 crc kubenswrapper[4932]: I1125 10:24:44.817926 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-mwtbt" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.176896 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c944859d7-d4x4f"] Nov 25 10:24:45 crc kubenswrapper[4932]: E1125 10:24:45.177341 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5deeb987-1aff-4645-9018-492f7517dcc6" containerName="cinder-db-sync" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.177357 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5deeb987-1aff-4645-9018-492f7517dcc6" containerName="cinder-db-sync" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.177608 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="5deeb987-1aff-4645-9018-492f7517dcc6" containerName="cinder-db-sync" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.178830 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.199553 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c944859d7-d4x4f"] Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.227082 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-config\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.227167 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-sb\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.227211 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffdpx\" (UniqueName: \"kubernetes.io/projected/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-kube-api-access-ffdpx\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.227243 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-nb\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.227281 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-dns-svc\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.329370 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-config\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.329951 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-sb\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.329975 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffdpx\" (UniqueName: \"kubernetes.io/projected/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-kube-api-access-ffdpx\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.330005 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-nb\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.330041 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-dns-svc\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.330396 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-config\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.331158 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-dns-svc\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.331172 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-sb\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.329661 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.332163 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-nb\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.332850 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.335582 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-sxl4j" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.335761 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.337035 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.337633 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.344561 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.372865 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffdpx\" (UniqueName: \"kubernetes.io/projected/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-kube-api-access-ffdpx\") pod \"dnsmasq-dns-7c944859d7-d4x4f\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.431734 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.431784 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data-custom\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.431818 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-scripts\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.431861 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/981c447d-14be-4e2f-8619-d8099dac746c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.431893 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7zl2\" (UniqueName: \"kubernetes.io/projected/981c447d-14be-4e2f-8619-d8099dac746c-kube-api-access-f7zl2\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.431915 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.431935 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/981c447d-14be-4e2f-8619-d8099dac746c-logs\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.503574 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.533458 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7zl2\" (UniqueName: \"kubernetes.io/projected/981c447d-14be-4e2f-8619-d8099dac746c-kube-api-access-f7zl2\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.533528 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.533562 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/981c447d-14be-4e2f-8619-d8099dac746c-logs\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.533672 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.533694 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data-custom\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.533730 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-scripts\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.533779 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/981c447d-14be-4e2f-8619-d8099dac746c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.533872 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/981c447d-14be-4e2f-8619-d8099dac746c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.540514 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data-custom\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.540660 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.540776 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/981c447d-14be-4e2f-8619-d8099dac746c-logs\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.541282 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.541437 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-scripts\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.561799 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7zl2\" (UniqueName: \"kubernetes.io/projected/981c447d-14be-4e2f-8619-d8099dac746c-kube-api-access-f7zl2\") pod \"cinder-api-0\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " pod="openstack/cinder-api-0" Nov 25 10:24:45 crc kubenswrapper[4932]: I1125 10:24:45.653423 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:24:46 crc kubenswrapper[4932]: I1125 10:24:46.213018 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c944859d7-d4x4f"] Nov 25 10:24:46 crc kubenswrapper[4932]: I1125 10:24:46.388366 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:24:46 crc kubenswrapper[4932]: I1125 10:24:46.874960 4932 generic.go:334] "Generic (PLEG): container finished" podID="bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" containerID="5d02cff61ad6bfecad1b394b204bb55f6f05e041ab27663c230abac75231275d" exitCode=0 Nov 25 10:24:46 crc kubenswrapper[4932]: I1125 10:24:46.875090 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" event={"ID":"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b","Type":"ContainerDied","Data":"5d02cff61ad6bfecad1b394b204bb55f6f05e041ab27663c230abac75231275d"} Nov 25 10:24:46 crc kubenswrapper[4932]: I1125 10:24:46.875693 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" event={"ID":"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b","Type":"ContainerStarted","Data":"a0fe21c29a68090f0ecb006fbe90b05e8d6e1e59687027c16c592ae7ac55130a"} Nov 25 10:24:46 crc kubenswrapper[4932]: I1125 10:24:46.882028 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"981c447d-14be-4e2f-8619-d8099dac746c","Type":"ContainerStarted","Data":"46966d8f8b8d06208d69141de9421e050fa9d0f808d7d3e29b4ba9cf8c75e9ec"} Nov 25 10:24:47 crc kubenswrapper[4932]: I1125 10:24:47.608112 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:24:47 crc kubenswrapper[4932]: E1125 10:24:47.608452 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:24:47 crc kubenswrapper[4932]: I1125 10:24:47.844429 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:24:47 crc kubenswrapper[4932]: I1125 10:24:47.891121 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" event={"ID":"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b","Type":"ContainerStarted","Data":"2ed2ab6ea243326a881b722ff99d48088ac86a085409b6cb43f4f7c02dc6fea0"} Nov 25 10:24:47 crc kubenswrapper[4932]: I1125 10:24:47.892357 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:47 crc kubenswrapper[4932]: I1125 10:24:47.894239 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"981c447d-14be-4e2f-8619-d8099dac746c","Type":"ContainerStarted","Data":"5a11a407adb7e89b8d84f908cafed91b4d6cf316458eaff9474ea6d2aad9c19d"} Nov 25 10:24:47 crc kubenswrapper[4932]: I1125 10:24:47.914965 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" podStartSLOduration=2.91494549 podStartE2EDuration="2.91494549s" podCreationTimestamp="2025-11-25 10:24:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:24:47.911564903 +0000 UTC m=+5748.037594476" watchObservedRunningTime="2025-11-25 10:24:47.91494549 +0000 UTC m=+5748.040975043" Nov 25 10:24:48 crc kubenswrapper[4932]: I1125 10:24:48.907302 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="981c447d-14be-4e2f-8619-d8099dac746c" containerName="cinder-api-log" containerID="cri-o://5a11a407adb7e89b8d84f908cafed91b4d6cf316458eaff9474ea6d2aad9c19d" gracePeriod=30 Nov 25 10:24:48 crc kubenswrapper[4932]: I1125 10:24:48.907549 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"981c447d-14be-4e2f-8619-d8099dac746c","Type":"ContainerStarted","Data":"9325c0c0148a0198729707186188bdc077ecde067f06d69bfb87af20d6b086e3"} Nov 25 10:24:48 crc kubenswrapper[4932]: I1125 10:24:48.907586 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 10:24:48 crc kubenswrapper[4932]: I1125 10:24:48.907787 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="981c447d-14be-4e2f-8619-d8099dac746c" containerName="cinder-api" containerID="cri-o://9325c0c0148a0198729707186188bdc077ecde067f06d69bfb87af20d6b086e3" gracePeriod=30 Nov 25 10:24:48 crc kubenswrapper[4932]: I1125 10:24:48.933895 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.933877508 podStartE2EDuration="3.933877508s" podCreationTimestamp="2025-11-25 10:24:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:24:48.925974892 +0000 UTC m=+5749.052004465" watchObservedRunningTime="2025-11-25 10:24:48.933877508 +0000 UTC m=+5749.059907071" Nov 25 10:24:49 crc kubenswrapper[4932]: I1125 10:24:49.920578 4932 generic.go:334] "Generic (PLEG): container finished" podID="981c447d-14be-4e2f-8619-d8099dac746c" containerID="9325c0c0148a0198729707186188bdc077ecde067f06d69bfb87af20d6b086e3" exitCode=0 Nov 25 10:24:49 crc kubenswrapper[4932]: I1125 10:24:49.921069 4932 generic.go:334] "Generic (PLEG): container finished" podID="981c447d-14be-4e2f-8619-d8099dac746c" containerID="5a11a407adb7e89b8d84f908cafed91b4d6cf316458eaff9474ea6d2aad9c19d" exitCode=143 Nov 25 10:24:49 crc kubenswrapper[4932]: I1125 10:24:49.920720 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"981c447d-14be-4e2f-8619-d8099dac746c","Type":"ContainerDied","Data":"9325c0c0148a0198729707186188bdc077ecde067f06d69bfb87af20d6b086e3"} Nov 25 10:24:49 crc kubenswrapper[4932]: I1125 10:24:49.922055 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"981c447d-14be-4e2f-8619-d8099dac746c","Type":"ContainerDied","Data":"5a11a407adb7e89b8d84f908cafed91b4d6cf316458eaff9474ea6d2aad9c19d"} Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.066068 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.139956 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-scripts\") pod \"981c447d-14be-4e2f-8619-d8099dac746c\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.140061 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/981c447d-14be-4e2f-8619-d8099dac746c-etc-machine-id\") pod \"981c447d-14be-4e2f-8619-d8099dac746c\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.140110 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data\") pod \"981c447d-14be-4e2f-8619-d8099dac746c\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.140179 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data-custom\") pod \"981c447d-14be-4e2f-8619-d8099dac746c\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.140244 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-combined-ca-bundle\") pod \"981c447d-14be-4e2f-8619-d8099dac746c\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.140282 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7zl2\" (UniqueName: \"kubernetes.io/projected/981c447d-14be-4e2f-8619-d8099dac746c-kube-api-access-f7zl2\") pod \"981c447d-14be-4e2f-8619-d8099dac746c\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.140307 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/981c447d-14be-4e2f-8619-d8099dac746c-logs\") pod \"981c447d-14be-4e2f-8619-d8099dac746c\" (UID: \"981c447d-14be-4e2f-8619-d8099dac746c\") " Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.140539 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/981c447d-14be-4e2f-8619-d8099dac746c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "981c447d-14be-4e2f-8619-d8099dac746c" (UID: "981c447d-14be-4e2f-8619-d8099dac746c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.140780 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/981c447d-14be-4e2f-8619-d8099dac746c-logs" (OuterVolumeSpecName: "logs") pod "981c447d-14be-4e2f-8619-d8099dac746c" (UID: "981c447d-14be-4e2f-8619-d8099dac746c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.141163 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/981c447d-14be-4e2f-8619-d8099dac746c-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.141372 4932 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/981c447d-14be-4e2f-8619-d8099dac746c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.147529 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/981c447d-14be-4e2f-8619-d8099dac746c-kube-api-access-f7zl2" (OuterVolumeSpecName: "kube-api-access-f7zl2") pod "981c447d-14be-4e2f-8619-d8099dac746c" (UID: "981c447d-14be-4e2f-8619-d8099dac746c"). InnerVolumeSpecName "kube-api-access-f7zl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.147634 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-scripts" (OuterVolumeSpecName: "scripts") pod "981c447d-14be-4e2f-8619-d8099dac746c" (UID: "981c447d-14be-4e2f-8619-d8099dac746c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.148589 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "981c447d-14be-4e2f-8619-d8099dac746c" (UID: "981c447d-14be-4e2f-8619-d8099dac746c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.179597 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "981c447d-14be-4e2f-8619-d8099dac746c" (UID: "981c447d-14be-4e2f-8619-d8099dac746c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.199991 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data" (OuterVolumeSpecName: "config-data") pod "981c447d-14be-4e2f-8619-d8099dac746c" (UID: "981c447d-14be-4e2f-8619-d8099dac746c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.244063 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.244129 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.244140 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.244154 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/981c447d-14be-4e2f-8619-d8099dac746c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.244164 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7zl2\" (UniqueName: \"kubernetes.io/projected/981c447d-14be-4e2f-8619-d8099dac746c-kube-api-access-f7zl2\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.932649 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"981c447d-14be-4e2f-8619-d8099dac746c","Type":"ContainerDied","Data":"46966d8f8b8d06208d69141de9421e050fa9d0f808d7d3e29b4ba9cf8c75e9ec"} Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.933262 4932 scope.go:117] "RemoveContainer" containerID="9325c0c0148a0198729707186188bdc077ecde067f06d69bfb87af20d6b086e3" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.932744 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.960695 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.961529 4932 scope.go:117] "RemoveContainer" containerID="5a11a407adb7e89b8d84f908cafed91b4d6cf316458eaff9474ea6d2aad9c19d" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.969984 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.989045 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:24:50 crc kubenswrapper[4932]: E1125 10:24:50.989565 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="981c447d-14be-4e2f-8619-d8099dac746c" containerName="cinder-api-log" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.989590 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="981c447d-14be-4e2f-8619-d8099dac746c" containerName="cinder-api-log" Nov 25 10:24:50 crc kubenswrapper[4932]: E1125 10:24:50.989605 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="981c447d-14be-4e2f-8619-d8099dac746c" containerName="cinder-api" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.989614 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="981c447d-14be-4e2f-8619-d8099dac746c" containerName="cinder-api" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.989837 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="981c447d-14be-4e2f-8619-d8099dac746c" containerName="cinder-api-log" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.989865 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="981c447d-14be-4e2f-8619-d8099dac746c" containerName="cinder-api" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.992299 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.996335 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.996381 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-sxl4j" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.996482 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.996576 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 10:24:50 crc kubenswrapper[4932]: I1125 10:24:50.998477 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.000266 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.004393 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.060293 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.060429 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-scripts\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.060468 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8ab2177-5150-40f7-9366-e1425040ce69-logs\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.060504 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-public-tls-certs\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.060573 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.060682 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.060726 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jg7gw\" (UniqueName: \"kubernetes.io/projected/d8ab2177-5150-40f7-9366-e1425040ce69-kube-api-access-jg7gw\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.060767 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data-custom\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.060801 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d8ab2177-5150-40f7-9366-e1425040ce69-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.163243 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.163634 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-scripts\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.163721 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8ab2177-5150-40f7-9366-e1425040ce69-logs\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.163826 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-public-tls-certs\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.163952 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.164071 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.164185 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jg7gw\" (UniqueName: \"kubernetes.io/projected/d8ab2177-5150-40f7-9366-e1425040ce69-kube-api-access-jg7gw\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.164318 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data-custom\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.164426 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d8ab2177-5150-40f7-9366-e1425040ce69-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.164527 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8ab2177-5150-40f7-9366-e1425040ce69-logs\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.164646 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d8ab2177-5150-40f7-9366-e1425040ce69-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.168862 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-public-tls-certs\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.168872 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data-custom\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.168916 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.169445 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.169610 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-scripts\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.170339 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.181856 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jg7gw\" (UniqueName: \"kubernetes.io/projected/d8ab2177-5150-40f7-9366-e1425040ce69-kube-api-access-jg7gw\") pod \"cinder-api-0\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.323161 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.887724 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:24:51 crc kubenswrapper[4932]: W1125 10:24:51.893961 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8ab2177_5150_40f7_9366_e1425040ce69.slice/crio-52e24a93a9a734d9b13851391568299ca7ed07e4491d1e03fbf739edf6ffd719 WatchSource:0}: Error finding container 52e24a93a9a734d9b13851391568299ca7ed07e4491d1e03fbf739edf6ffd719: Status 404 returned error can't find the container with id 52e24a93a9a734d9b13851391568299ca7ed07e4491d1e03fbf739edf6ffd719 Nov 25 10:24:51 crc kubenswrapper[4932]: I1125 10:24:51.957478 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d8ab2177-5150-40f7-9366-e1425040ce69","Type":"ContainerStarted","Data":"52e24a93a9a734d9b13851391568299ca7ed07e4491d1e03fbf739edf6ffd719"} Nov 25 10:24:52 crc kubenswrapper[4932]: I1125 10:24:52.624593 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="981c447d-14be-4e2f-8619-d8099dac746c" path="/var/lib/kubelet/pods/981c447d-14be-4e2f-8619-d8099dac746c/volumes" Nov 25 10:24:52 crc kubenswrapper[4932]: I1125 10:24:52.976394 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d8ab2177-5150-40f7-9366-e1425040ce69","Type":"ContainerStarted","Data":"a360545592b548d717071408aba0985a6476b7ccbada89c7d8dfadb1746851b7"} Nov 25 10:24:53 crc kubenswrapper[4932]: I1125 10:24:53.988848 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d8ab2177-5150-40f7-9366-e1425040ce69","Type":"ContainerStarted","Data":"918dd49aa6de686c5555052f51529c6d93e4a1ad9eb180e7878347ab6300c76f"} Nov 25 10:24:53 crc kubenswrapper[4932]: I1125 10:24:53.989449 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 10:24:54 crc kubenswrapper[4932]: I1125 10:24:54.019099 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.019072096 podStartE2EDuration="4.019072096s" podCreationTimestamp="2025-11-25 10:24:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:24:54.006373812 +0000 UTC m=+5754.132403385" watchObservedRunningTime="2025-11-25 10:24:54.019072096 +0000 UTC m=+5754.145101659" Nov 25 10:24:55 crc kubenswrapper[4932]: I1125 10:24:55.506428 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:24:55 crc kubenswrapper[4932]: I1125 10:24:55.563222 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c46f9d5bf-gx5n6"] Nov 25 10:24:55 crc kubenswrapper[4932]: I1125 10:24:55.563756 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" podUID="a6d6e550-47c7-40fb-84de-4603f403720d" containerName="dnsmasq-dns" containerID="cri-o://fc008c454e853da5bee0263aa2bbf89684bb8e828a3d5ef25066d9a778ff45f0" gracePeriod=10 Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.016362 4932 generic.go:334] "Generic (PLEG): container finished" podID="a6d6e550-47c7-40fb-84de-4603f403720d" containerID="fc008c454e853da5bee0263aa2bbf89684bb8e828a3d5ef25066d9a778ff45f0" exitCode=0 Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.017000 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" event={"ID":"a6d6e550-47c7-40fb-84de-4603f403720d","Type":"ContainerDied","Data":"fc008c454e853da5bee0263aa2bbf89684bb8e828a3d5ef25066d9a778ff45f0"} Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.017074 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" event={"ID":"a6d6e550-47c7-40fb-84de-4603f403720d","Type":"ContainerDied","Data":"c5c678b7eec5329a2700edd7bb632ead28e5bcca85bf097f687fc1cf9e91776c"} Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.017149 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5c678b7eec5329a2700edd7bb632ead28e5bcca85bf097f687fc1cf9e91776c" Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.102058 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.159905 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-dns-svc\") pod \"a6d6e550-47c7-40fb-84de-4603f403720d\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.159993 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-nb\") pod \"a6d6e550-47c7-40fb-84de-4603f403720d\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.160113 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-config\") pod \"a6d6e550-47c7-40fb-84de-4603f403720d\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.160322 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kp8qb\" (UniqueName: \"kubernetes.io/projected/a6d6e550-47c7-40fb-84de-4603f403720d-kube-api-access-kp8qb\") pod \"a6d6e550-47c7-40fb-84de-4603f403720d\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.160439 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-sb\") pod \"a6d6e550-47c7-40fb-84de-4603f403720d\" (UID: \"a6d6e550-47c7-40fb-84de-4603f403720d\") " Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.170106 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6d6e550-47c7-40fb-84de-4603f403720d-kube-api-access-kp8qb" (OuterVolumeSpecName: "kube-api-access-kp8qb") pod "a6d6e550-47c7-40fb-84de-4603f403720d" (UID: "a6d6e550-47c7-40fb-84de-4603f403720d"). InnerVolumeSpecName "kube-api-access-kp8qb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.230370 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a6d6e550-47c7-40fb-84de-4603f403720d" (UID: "a6d6e550-47c7-40fb-84de-4603f403720d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.234293 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a6d6e550-47c7-40fb-84de-4603f403720d" (UID: "a6d6e550-47c7-40fb-84de-4603f403720d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.237107 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a6d6e550-47c7-40fb-84de-4603f403720d" (UID: "a6d6e550-47c7-40fb-84de-4603f403720d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.239403 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-config" (OuterVolumeSpecName: "config") pod "a6d6e550-47c7-40fb-84de-4603f403720d" (UID: "a6d6e550-47c7-40fb-84de-4603f403720d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.262664 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kp8qb\" (UniqueName: \"kubernetes.io/projected/a6d6e550-47c7-40fb-84de-4603f403720d-kube-api-access-kp8qb\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.262709 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.262720 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.262729 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:56 crc kubenswrapper[4932]: I1125 10:24:56.262738 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6d6e550-47c7-40fb-84de-4603f403720d-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:24:57 crc kubenswrapper[4932]: I1125 10:24:57.026565 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" Nov 25 10:24:57 crc kubenswrapper[4932]: I1125 10:24:57.049639 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c46f9d5bf-gx5n6"] Nov 25 10:24:57 crc kubenswrapper[4932]: I1125 10:24:57.056082 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c46f9d5bf-gx5n6"] Nov 25 10:24:58 crc kubenswrapper[4932]: I1125 10:24:58.606743 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:24:58 crc kubenswrapper[4932]: E1125 10:24:58.607062 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:24:58 crc kubenswrapper[4932]: I1125 10:24:58.616040 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6d6e550-47c7-40fb-84de-4603f403720d" path="/var/lib/kubelet/pods/a6d6e550-47c7-40fb-84de-4603f403720d/volumes" Nov 25 10:25:00 crc kubenswrapper[4932]: I1125 10:25:00.873469 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6c46f9d5bf-gx5n6" podUID="a6d6e550-47c7-40fb-84de-4603f403720d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.50:5353: i/o timeout" Nov 25 10:25:03 crc kubenswrapper[4932]: I1125 10:25:03.507093 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 10:25:09 crc kubenswrapper[4932]: I1125 10:25:09.646242 4932 scope.go:117] "RemoveContainer" containerID="6eabe436fa2bc424ca4a5638346fb7571990122e9d64fda0b7fedc0837baa3a0" Nov 25 10:25:11 crc kubenswrapper[4932]: I1125 10:25:11.606082 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:25:11 crc kubenswrapper[4932]: E1125 10:25:11.606947 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:25:21 crc kubenswrapper[4932]: I1125 10:25:21.987388 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:25:21 crc kubenswrapper[4932]: E1125 10:25:21.988402 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6d6e550-47c7-40fb-84de-4603f403720d" containerName="init" Nov 25 10:25:21 crc kubenswrapper[4932]: I1125 10:25:21.988421 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6d6e550-47c7-40fb-84de-4603f403720d" containerName="init" Nov 25 10:25:21 crc kubenswrapper[4932]: E1125 10:25:21.988459 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6d6e550-47c7-40fb-84de-4603f403720d" containerName="dnsmasq-dns" Nov 25 10:25:21 crc kubenswrapper[4932]: I1125 10:25:21.988468 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6d6e550-47c7-40fb-84de-4603f403720d" containerName="dnsmasq-dns" Nov 25 10:25:21 crc kubenswrapper[4932]: I1125 10:25:21.988689 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6d6e550-47c7-40fb-84de-4603f403720d" containerName="dnsmasq-dns" Nov 25 10:25:21 crc kubenswrapper[4932]: I1125 10:25:21.989766 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:25:21 crc kubenswrapper[4932]: I1125 10:25:21.992967 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.006399 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.098366 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xldpn\" (UniqueName: \"kubernetes.io/projected/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-kube-api-access-xldpn\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.098458 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.098478 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.098502 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.098751 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.098805 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-scripts\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.199910 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.199957 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.199987 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.200101 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.200124 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-scripts\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.200166 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xldpn\" (UniqueName: \"kubernetes.io/projected/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-kube-api-access-xldpn\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.200204 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.206281 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-scripts\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.207218 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.207304 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.207668 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.223057 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xldpn\" (UniqueName: \"kubernetes.io/projected/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-kube-api-access-xldpn\") pod \"cinder-scheduler-0\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.328849 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.609539 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:25:22 crc kubenswrapper[4932]: E1125 10:25:22.610135 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:25:22 crc kubenswrapper[4932]: I1125 10:25:22.841306 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:25:23 crc kubenswrapper[4932]: I1125 10:25:23.259115 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e","Type":"ContainerStarted","Data":"253721d63db59a9e126367f6114d5915f69291d29ff64d93563f65bb273df58b"} Nov 25 10:25:23 crc kubenswrapper[4932]: I1125 10:25:23.353255 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:25:23 crc kubenswrapper[4932]: I1125 10:25:23.353625 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="d8ab2177-5150-40f7-9366-e1425040ce69" containerName="cinder-api-log" containerID="cri-o://a360545592b548d717071408aba0985a6476b7ccbada89c7d8dfadb1746851b7" gracePeriod=30 Nov 25 10:25:23 crc kubenswrapper[4932]: I1125 10:25:23.353926 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="d8ab2177-5150-40f7-9366-e1425040ce69" containerName="cinder-api" containerID="cri-o://918dd49aa6de686c5555052f51529c6d93e4a1ad9eb180e7878347ab6300c76f" gracePeriod=30 Nov 25 10:25:24 crc kubenswrapper[4932]: I1125 10:25:24.281261 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e","Type":"ContainerStarted","Data":"56a29fd788e2bfc24b26ef0bf21c2cb00df74786ea67ec34aec6735dd8f20e0d"} Nov 25 10:25:24 crc kubenswrapper[4932]: I1125 10:25:24.285457 4932 generic.go:334] "Generic (PLEG): container finished" podID="d8ab2177-5150-40f7-9366-e1425040ce69" containerID="a360545592b548d717071408aba0985a6476b7ccbada89c7d8dfadb1746851b7" exitCode=143 Nov 25 10:25:24 crc kubenswrapper[4932]: I1125 10:25:24.285507 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d8ab2177-5150-40f7-9366-e1425040ce69","Type":"ContainerDied","Data":"a360545592b548d717071408aba0985a6476b7ccbada89c7d8dfadb1746851b7"} Nov 25 10:25:25 crc kubenswrapper[4932]: I1125 10:25:25.296374 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e","Type":"ContainerStarted","Data":"8a794e6c2f75e9e58330ebd3b347ea7405ff3fa6226509421baff43cfa43968b"} Nov 25 10:25:25 crc kubenswrapper[4932]: I1125 10:25:25.318472 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.318452564 podStartE2EDuration="4.318452564s" podCreationTimestamp="2025-11-25 10:25:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:25:25.313549124 +0000 UTC m=+5785.439578687" watchObservedRunningTime="2025-11-25 10:25:25.318452564 +0000 UTC m=+5785.444482127" Nov 25 10:25:26 crc kubenswrapper[4932]: I1125 10:25:26.527562 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="d8ab2177-5150-40f7-9366-e1425040ce69" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.1.59:8776/healthcheck\": read tcp 10.217.0.2:33884->10.217.1.59:8776: read: connection reset by peer" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.316755 4932 generic.go:334] "Generic (PLEG): container finished" podID="d8ab2177-5150-40f7-9366-e1425040ce69" containerID="918dd49aa6de686c5555052f51529c6d93e4a1ad9eb180e7878347ab6300c76f" exitCode=0 Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.316832 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d8ab2177-5150-40f7-9366-e1425040ce69","Type":"ContainerDied","Data":"918dd49aa6de686c5555052f51529c6d93e4a1ad9eb180e7878347ab6300c76f"} Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.330948 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.689217 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.822353 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jg7gw\" (UniqueName: \"kubernetes.io/projected/d8ab2177-5150-40f7-9366-e1425040ce69-kube-api-access-jg7gw\") pod \"d8ab2177-5150-40f7-9366-e1425040ce69\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.822414 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d8ab2177-5150-40f7-9366-e1425040ce69-etc-machine-id\") pod \"d8ab2177-5150-40f7-9366-e1425040ce69\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.822496 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-internal-tls-certs\") pod \"d8ab2177-5150-40f7-9366-e1425040ce69\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.822565 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data-custom\") pod \"d8ab2177-5150-40f7-9366-e1425040ce69\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.822598 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-public-tls-certs\") pod \"d8ab2177-5150-40f7-9366-e1425040ce69\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.822618 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data\") pod \"d8ab2177-5150-40f7-9366-e1425040ce69\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.822677 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8ab2177-5150-40f7-9366-e1425040ce69-logs\") pod \"d8ab2177-5150-40f7-9366-e1425040ce69\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.822725 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-scripts\") pod \"d8ab2177-5150-40f7-9366-e1425040ce69\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.822788 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-combined-ca-bundle\") pod \"d8ab2177-5150-40f7-9366-e1425040ce69\" (UID: \"d8ab2177-5150-40f7-9366-e1425040ce69\") " Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.824765 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8ab2177-5150-40f7-9366-e1425040ce69-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d8ab2177-5150-40f7-9366-e1425040ce69" (UID: "d8ab2177-5150-40f7-9366-e1425040ce69"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.825027 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8ab2177-5150-40f7-9366-e1425040ce69-logs" (OuterVolumeSpecName: "logs") pod "d8ab2177-5150-40f7-9366-e1425040ce69" (UID: "d8ab2177-5150-40f7-9366-e1425040ce69"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.832438 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-scripts" (OuterVolumeSpecName: "scripts") pod "d8ab2177-5150-40f7-9366-e1425040ce69" (UID: "d8ab2177-5150-40f7-9366-e1425040ce69"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.841887 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8ab2177-5150-40f7-9366-e1425040ce69-kube-api-access-jg7gw" (OuterVolumeSpecName: "kube-api-access-jg7gw") pod "d8ab2177-5150-40f7-9366-e1425040ce69" (UID: "d8ab2177-5150-40f7-9366-e1425040ce69"). InnerVolumeSpecName "kube-api-access-jg7gw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.855802 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d8ab2177-5150-40f7-9366-e1425040ce69" (UID: "d8ab2177-5150-40f7-9366-e1425040ce69"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.865500 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d8ab2177-5150-40f7-9366-e1425040ce69" (UID: "d8ab2177-5150-40f7-9366-e1425040ce69"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.898298 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data" (OuterVolumeSpecName: "config-data") pod "d8ab2177-5150-40f7-9366-e1425040ce69" (UID: "d8ab2177-5150-40f7-9366-e1425040ce69"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.919733 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d8ab2177-5150-40f7-9366-e1425040ce69" (UID: "d8ab2177-5150-40f7-9366-e1425040ce69"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.926939 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.927176 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jg7gw\" (UniqueName: \"kubernetes.io/projected/d8ab2177-5150-40f7-9366-e1425040ce69-kube-api-access-jg7gw\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.927213 4932 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d8ab2177-5150-40f7-9366-e1425040ce69-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.927227 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.927238 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.927249 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.927258 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8ab2177-5150-40f7-9366-e1425040ce69-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.927270 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:27 crc kubenswrapper[4932]: I1125 10:25:27.928699 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d8ab2177-5150-40f7-9366-e1425040ce69" (UID: "d8ab2177-5150-40f7-9366-e1425040ce69"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.029440 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8ab2177-5150-40f7-9366-e1425040ce69-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.326560 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d8ab2177-5150-40f7-9366-e1425040ce69","Type":"ContainerDied","Data":"52e24a93a9a734d9b13851391568299ca7ed07e4491d1e03fbf739edf6ffd719"} Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.326613 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.326634 4932 scope.go:117] "RemoveContainer" containerID="918dd49aa6de686c5555052f51529c6d93e4a1ad9eb180e7878347ab6300c76f" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.356712 4932 scope.go:117] "RemoveContainer" containerID="a360545592b548d717071408aba0985a6476b7ccbada89c7d8dfadb1746851b7" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.362704 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.377666 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.388199 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:25:28 crc kubenswrapper[4932]: E1125 10:25:28.388657 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8ab2177-5150-40f7-9366-e1425040ce69" containerName="cinder-api" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.388679 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8ab2177-5150-40f7-9366-e1425040ce69" containerName="cinder-api" Nov 25 10:25:28 crc kubenswrapper[4932]: E1125 10:25:28.388718 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8ab2177-5150-40f7-9366-e1425040ce69" containerName="cinder-api-log" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.388727 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8ab2177-5150-40f7-9366-e1425040ce69" containerName="cinder-api-log" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.388930 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8ab2177-5150-40f7-9366-e1425040ce69" containerName="cinder-api-log" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.388962 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8ab2177-5150-40f7-9366-e1425040ce69" containerName="cinder-api" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.390129 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.396849 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.397740 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.397896 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.397928 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.538005 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-config-data-custom\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.538067 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/456371be-e15e-41b0-acd7-ed16ff816964-etc-machine-id\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.538121 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.538237 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjdzx\" (UniqueName: \"kubernetes.io/projected/456371be-e15e-41b0-acd7-ed16ff816964-kube-api-access-rjdzx\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.538285 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.538313 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/456371be-e15e-41b0-acd7-ed16ff816964-logs\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.538500 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-public-tls-certs\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.538651 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-scripts\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.538769 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-config-data\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.619170 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8ab2177-5150-40f7-9366-e1425040ce69" path="/var/lib/kubelet/pods/d8ab2177-5150-40f7-9366-e1425040ce69/volumes" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.640355 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-config-data\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.640445 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-config-data-custom\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.640474 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/456371be-e15e-41b0-acd7-ed16ff816964-etc-machine-id\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.640497 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.640537 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjdzx\" (UniqueName: \"kubernetes.io/projected/456371be-e15e-41b0-acd7-ed16ff816964-kube-api-access-rjdzx\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.640574 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.640595 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/456371be-e15e-41b0-acd7-ed16ff816964-logs\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.640605 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/456371be-e15e-41b0-acd7-ed16ff816964-etc-machine-id\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.640709 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-public-tls-certs\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.640896 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-scripts\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.641334 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/456371be-e15e-41b0-acd7-ed16ff816964-logs\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.645012 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-config-data-custom\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.645033 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-public-tls-certs\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.645164 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.646105 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-config-data\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.646479 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-scripts\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.646646 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/456371be-e15e-41b0-acd7-ed16ff816964-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.664667 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjdzx\" (UniqueName: \"kubernetes.io/projected/456371be-e15e-41b0-acd7-ed16ff816964-kube-api-access-rjdzx\") pod \"cinder-api-0\" (UID: \"456371be-e15e-41b0-acd7-ed16ff816964\") " pod="openstack/cinder-api-0" Nov 25 10:25:28 crc kubenswrapper[4932]: I1125 10:25:28.714485 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 10:25:29 crc kubenswrapper[4932]: I1125 10:25:29.132089 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 10:25:29 crc kubenswrapper[4932]: I1125 10:25:29.336209 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"456371be-e15e-41b0-acd7-ed16ff816964","Type":"ContainerStarted","Data":"cca309d2aa7595fcd415f1acaacff088dbb7d6d3237cef42e62697260ea999f8"} Nov 25 10:25:30 crc kubenswrapper[4932]: I1125 10:25:30.349573 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"456371be-e15e-41b0-acd7-ed16ff816964","Type":"ContainerStarted","Data":"d01b09e55e3f753e5725286809574218ab81002c163931e1f8c9446ab9ab64b1"} Nov 25 10:25:31 crc kubenswrapper[4932]: I1125 10:25:31.359225 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"456371be-e15e-41b0-acd7-ed16ff816964","Type":"ContainerStarted","Data":"666944beea3d9c66cf8634b0d57eb74c267ab56949a89f184278c95852b3d9ac"} Nov 25 10:25:31 crc kubenswrapper[4932]: I1125 10:25:31.359813 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 10:25:31 crc kubenswrapper[4932]: I1125 10:25:31.378210 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.378170128 podStartE2EDuration="3.378170128s" podCreationTimestamp="2025-11-25 10:25:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:25:31.375275875 +0000 UTC m=+5791.501305468" watchObservedRunningTime="2025-11-25 10:25:31.378170128 +0000 UTC m=+5791.504199691" Nov 25 10:25:32 crc kubenswrapper[4932]: I1125 10:25:32.560651 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 10:25:32 crc kubenswrapper[4932]: I1125 10:25:32.595544 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:25:33 crc kubenswrapper[4932]: I1125 10:25:33.381830 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" containerName="cinder-scheduler" containerID="cri-o://56a29fd788e2bfc24b26ef0bf21c2cb00df74786ea67ec34aec6735dd8f20e0d" gracePeriod=30 Nov 25 10:25:33 crc kubenswrapper[4932]: I1125 10:25:33.381905 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" containerName="probe" containerID="cri-o://8a794e6c2f75e9e58330ebd3b347ea7405ff3fa6226509421baff43cfa43968b" gracePeriod=30 Nov 25 10:25:33 crc kubenswrapper[4932]: I1125 10:25:33.606268 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:25:33 crc kubenswrapper[4932]: E1125 10:25:33.606605 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:25:34 crc kubenswrapper[4932]: I1125 10:25:34.391631 4932 generic.go:334] "Generic (PLEG): container finished" podID="b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" containerID="8a794e6c2f75e9e58330ebd3b347ea7405ff3fa6226509421baff43cfa43968b" exitCode=0 Nov 25 10:25:34 crc kubenswrapper[4932]: I1125 10:25:34.391687 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e","Type":"ContainerDied","Data":"8a794e6c2f75e9e58330ebd3b347ea7405ff3fa6226509421baff43cfa43968b"} Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.403788 4932 generic.go:334] "Generic (PLEG): container finished" podID="b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" containerID="56a29fd788e2bfc24b26ef0bf21c2cb00df74786ea67ec34aec6735dd8f20e0d" exitCode=0 Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.403825 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e","Type":"ContainerDied","Data":"56a29fd788e2bfc24b26ef0bf21c2cb00df74786ea67ec34aec6735dd8f20e0d"} Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.699026 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.796585 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-combined-ca-bundle\") pod \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.796798 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-scripts\") pod \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.796881 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data\") pod \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.796962 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-etc-machine-id\") pod \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.796994 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data-custom\") pod \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.797040 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xldpn\" (UniqueName: \"kubernetes.io/projected/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-kube-api-access-xldpn\") pod \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\" (UID: \"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e\") " Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.798611 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" (UID: "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.802541 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-scripts" (OuterVolumeSpecName: "scripts") pod "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" (UID: "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.802557 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-kube-api-access-xldpn" (OuterVolumeSpecName: "kube-api-access-xldpn") pod "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" (UID: "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e"). InnerVolumeSpecName "kube-api-access-xldpn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.802795 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" (UID: "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.858486 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" (UID: "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.899753 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.899791 4932 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.899804 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.899815 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xldpn\" (UniqueName: \"kubernetes.io/projected/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-kube-api-access-xldpn\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.899827 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:35 crc kubenswrapper[4932]: I1125 10:25:35.904026 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data" (OuterVolumeSpecName: "config-data") pod "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" (UID: "b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.002270 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.416553 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e","Type":"ContainerDied","Data":"253721d63db59a9e126367f6114d5915f69291d29ff64d93563f65bb273df58b"} Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.416601 4932 scope.go:117] "RemoveContainer" containerID="8a794e6c2f75e9e58330ebd3b347ea7405ff3fa6226509421baff43cfa43968b" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.416741 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.451459 4932 scope.go:117] "RemoveContainer" containerID="56a29fd788e2bfc24b26ef0bf21c2cb00df74786ea67ec34aec6735dd8f20e0d" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.454335 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.473827 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.483794 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:25:36 crc kubenswrapper[4932]: E1125 10:25:36.484231 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" containerName="probe" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.484257 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" containerName="probe" Nov 25 10:25:36 crc kubenswrapper[4932]: E1125 10:25:36.484285 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" containerName="cinder-scheduler" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.484291 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" containerName="cinder-scheduler" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.484464 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" containerName="cinder-scheduler" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.484485 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" containerName="probe" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.485479 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.488539 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.494685 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.613136 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-scripts\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.613211 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be3417cc-8482-4a84-aeeb-97fa868057af-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.613570 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-config-data\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.613632 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8x6x\" (UniqueName: \"kubernetes.io/projected/be3417cc-8482-4a84-aeeb-97fa868057af-kube-api-access-w8x6x\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.613726 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.613765 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.616075 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e" path="/var/lib/kubelet/pods/b04e8dbb-4102-46b5-9f1a-5fff7ca84f1e/volumes" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.716287 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-scripts\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.716345 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be3417cc-8482-4a84-aeeb-97fa868057af-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.716407 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-config-data\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.716429 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8x6x\" (UniqueName: \"kubernetes.io/projected/be3417cc-8482-4a84-aeeb-97fa868057af-kube-api-access-w8x6x\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.716493 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.716522 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.717653 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be3417cc-8482-4a84-aeeb-97fa868057af-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.722330 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-scripts\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.726357 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-config-data\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.727279 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.729134 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be3417cc-8482-4a84-aeeb-97fa868057af-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.735603 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8x6x\" (UniqueName: \"kubernetes.io/projected/be3417cc-8482-4a84-aeeb-97fa868057af-kube-api-access-w8x6x\") pod \"cinder-scheduler-0\" (UID: \"be3417cc-8482-4a84-aeeb-97fa868057af\") " pod="openstack/cinder-scheduler-0" Nov 25 10:25:36 crc kubenswrapper[4932]: I1125 10:25:36.811613 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 10:25:37 crc kubenswrapper[4932]: I1125 10:25:37.280149 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 10:25:37 crc kubenswrapper[4932]: I1125 10:25:37.438274 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"be3417cc-8482-4a84-aeeb-97fa868057af","Type":"ContainerStarted","Data":"4018d7c2c92314d1a7c279d7ffb8614c912bf8a8040a287bd1883e57d052116a"} Nov 25 10:25:38 crc kubenswrapper[4932]: I1125 10:25:38.451917 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"be3417cc-8482-4a84-aeeb-97fa868057af","Type":"ContainerStarted","Data":"201910b8fb17b76d5583ea993a427da9fe15aaae10a5a258e7cd81410d418ef7"} Nov 25 10:25:39 crc kubenswrapper[4932]: I1125 10:25:39.461946 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"be3417cc-8482-4a84-aeeb-97fa868057af","Type":"ContainerStarted","Data":"feab358cd60549c029360a617e5479feb23569dc54567b54655c99312516b1cd"} Nov 25 10:25:40 crc kubenswrapper[4932]: I1125 10:25:40.636755 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 10:25:40 crc kubenswrapper[4932]: I1125 10:25:40.656431 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.655517913 podStartE2EDuration="4.655517913s" podCreationTimestamp="2025-11-25 10:25:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:25:39.483462328 +0000 UTC m=+5799.609491911" watchObservedRunningTime="2025-11-25 10:25:40.655517913 +0000 UTC m=+5800.781547476" Nov 25 10:25:41 crc kubenswrapper[4932]: I1125 10:25:41.811867 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 10:25:47 crc kubenswrapper[4932]: I1125 10:25:47.034344 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 10:25:47 crc kubenswrapper[4932]: I1125 10:25:47.606319 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:25:47 crc kubenswrapper[4932]: E1125 10:25:47.608006 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.030017 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-t2vdr"] Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.031298 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-t2vdr" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.044234 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-t2vdr"] Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.113587 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8lck\" (UniqueName: \"kubernetes.io/projected/291edd7f-043c-45bc-9229-98c45b151377-kube-api-access-h8lck\") pod \"glance-db-create-t2vdr\" (UID: \"291edd7f-043c-45bc-9229-98c45b151377\") " pod="openstack/glance-db-create-t2vdr" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.113985 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/291edd7f-043c-45bc-9229-98c45b151377-operator-scripts\") pod \"glance-db-create-t2vdr\" (UID: \"291edd7f-043c-45bc-9229-98c45b151377\") " pod="openstack/glance-db-create-t2vdr" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.138300 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-627c-account-create-jm7w5"] Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.139807 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-627c-account-create-jm7w5" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.141550 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.152630 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-627c-account-create-jm7w5"] Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.215279 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8lck\" (UniqueName: \"kubernetes.io/projected/291edd7f-043c-45bc-9229-98c45b151377-kube-api-access-h8lck\") pod \"glance-db-create-t2vdr\" (UID: \"291edd7f-043c-45bc-9229-98c45b151377\") " pod="openstack/glance-db-create-t2vdr" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.215362 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/291edd7f-043c-45bc-9229-98c45b151377-operator-scripts\") pod \"glance-db-create-t2vdr\" (UID: \"291edd7f-043c-45bc-9229-98c45b151377\") " pod="openstack/glance-db-create-t2vdr" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.215426 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f9473ff-2e53-426d-8c1a-3da28f58404b-operator-scripts\") pod \"glance-627c-account-create-jm7w5\" (UID: \"6f9473ff-2e53-426d-8c1a-3da28f58404b\") " pod="openstack/glance-627c-account-create-jm7w5" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.215477 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xljj\" (UniqueName: \"kubernetes.io/projected/6f9473ff-2e53-426d-8c1a-3da28f58404b-kube-api-access-9xljj\") pod \"glance-627c-account-create-jm7w5\" (UID: \"6f9473ff-2e53-426d-8c1a-3da28f58404b\") " pod="openstack/glance-627c-account-create-jm7w5" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.216172 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/291edd7f-043c-45bc-9229-98c45b151377-operator-scripts\") pod \"glance-db-create-t2vdr\" (UID: \"291edd7f-043c-45bc-9229-98c45b151377\") " pod="openstack/glance-db-create-t2vdr" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.252281 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8lck\" (UniqueName: \"kubernetes.io/projected/291edd7f-043c-45bc-9229-98c45b151377-kube-api-access-h8lck\") pod \"glance-db-create-t2vdr\" (UID: \"291edd7f-043c-45bc-9229-98c45b151377\") " pod="openstack/glance-db-create-t2vdr" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.316731 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f9473ff-2e53-426d-8c1a-3da28f58404b-operator-scripts\") pod \"glance-627c-account-create-jm7w5\" (UID: \"6f9473ff-2e53-426d-8c1a-3da28f58404b\") " pod="openstack/glance-627c-account-create-jm7w5" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.316815 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xljj\" (UniqueName: \"kubernetes.io/projected/6f9473ff-2e53-426d-8c1a-3da28f58404b-kube-api-access-9xljj\") pod \"glance-627c-account-create-jm7w5\" (UID: \"6f9473ff-2e53-426d-8c1a-3da28f58404b\") " pod="openstack/glance-627c-account-create-jm7w5" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.318029 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f9473ff-2e53-426d-8c1a-3da28f58404b-operator-scripts\") pod \"glance-627c-account-create-jm7w5\" (UID: \"6f9473ff-2e53-426d-8c1a-3da28f58404b\") " pod="openstack/glance-627c-account-create-jm7w5" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.334023 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xljj\" (UniqueName: \"kubernetes.io/projected/6f9473ff-2e53-426d-8c1a-3da28f58404b-kube-api-access-9xljj\") pod \"glance-627c-account-create-jm7w5\" (UID: \"6f9473ff-2e53-426d-8c1a-3da28f58404b\") " pod="openstack/glance-627c-account-create-jm7w5" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.351787 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-t2vdr" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.457673 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-627c-account-create-jm7w5" Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.860538 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-t2vdr"] Nov 25 10:25:49 crc kubenswrapper[4932]: W1125 10:25:49.870524 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod291edd7f_043c_45bc_9229_98c45b151377.slice/crio-68600d9dde403189bd828a1722254f95d2fcc345f4a5a0ebf62b2bd6d0352c39 WatchSource:0}: Error finding container 68600d9dde403189bd828a1722254f95d2fcc345f4a5a0ebf62b2bd6d0352c39: Status 404 returned error can't find the container with id 68600d9dde403189bd828a1722254f95d2fcc345f4a5a0ebf62b2bd6d0352c39 Nov 25 10:25:49 crc kubenswrapper[4932]: I1125 10:25:49.986046 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-627c-account-create-jm7w5"] Nov 25 10:25:50 crc kubenswrapper[4932]: I1125 10:25:50.600992 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-t2vdr" event={"ID":"291edd7f-043c-45bc-9229-98c45b151377","Type":"ContainerStarted","Data":"f40f62600dee6687ec7594a169524917865615d86973705a738a022f0b1f0db4"} Nov 25 10:25:50 crc kubenswrapper[4932]: I1125 10:25:50.601038 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-t2vdr" event={"ID":"291edd7f-043c-45bc-9229-98c45b151377","Type":"ContainerStarted","Data":"68600d9dde403189bd828a1722254f95d2fcc345f4a5a0ebf62b2bd6d0352c39"} Nov 25 10:25:50 crc kubenswrapper[4932]: I1125 10:25:50.602385 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-627c-account-create-jm7w5" event={"ID":"6f9473ff-2e53-426d-8c1a-3da28f58404b","Type":"ContainerStarted","Data":"a992f44917128685707ff6b1565745c3e88689fd9814e1868e3da7486cf75af0"} Nov 25 10:25:51 crc kubenswrapper[4932]: I1125 10:25:51.615951 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-627c-account-create-jm7w5" event={"ID":"6f9473ff-2e53-426d-8c1a-3da28f58404b","Type":"ContainerStarted","Data":"196f0667ad5b62d36da5d4e06507c6a14b093f7813c8d0fc6e9ef30a6a958af2"} Nov 25 10:25:51 crc kubenswrapper[4932]: I1125 10:25:51.631641 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-627c-account-create-jm7w5" podStartSLOduration=2.631618038 podStartE2EDuration="2.631618038s" podCreationTimestamp="2025-11-25 10:25:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:25:51.627585003 +0000 UTC m=+5811.753614576" watchObservedRunningTime="2025-11-25 10:25:51.631618038 +0000 UTC m=+5811.757647611" Nov 25 10:25:51 crc kubenswrapper[4932]: I1125 10:25:51.654784 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-t2vdr" podStartSLOduration=2.654755781 podStartE2EDuration="2.654755781s" podCreationTimestamp="2025-11-25 10:25:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:25:51.644020094 +0000 UTC m=+5811.770049657" watchObservedRunningTime="2025-11-25 10:25:51.654755781 +0000 UTC m=+5811.780785354" Nov 25 10:25:52 crc kubenswrapper[4932]: I1125 10:25:52.636725 4932 generic.go:334] "Generic (PLEG): container finished" podID="6f9473ff-2e53-426d-8c1a-3da28f58404b" containerID="196f0667ad5b62d36da5d4e06507c6a14b093f7813c8d0fc6e9ef30a6a958af2" exitCode=0 Nov 25 10:25:52 crc kubenswrapper[4932]: I1125 10:25:52.636795 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-627c-account-create-jm7w5" event={"ID":"6f9473ff-2e53-426d-8c1a-3da28f58404b","Type":"ContainerDied","Data":"196f0667ad5b62d36da5d4e06507c6a14b093f7813c8d0fc6e9ef30a6a958af2"} Nov 25 10:25:52 crc kubenswrapper[4932]: I1125 10:25:52.642136 4932 generic.go:334] "Generic (PLEG): container finished" podID="291edd7f-043c-45bc-9229-98c45b151377" containerID="f40f62600dee6687ec7594a169524917865615d86973705a738a022f0b1f0db4" exitCode=0 Nov 25 10:25:52 crc kubenswrapper[4932]: I1125 10:25:52.642200 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-t2vdr" event={"ID":"291edd7f-043c-45bc-9229-98c45b151377","Type":"ContainerDied","Data":"f40f62600dee6687ec7594a169524917865615d86973705a738a022f0b1f0db4"} Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.036488 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-t2vdr" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.043270 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-627c-account-create-jm7w5" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.108058 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8lck\" (UniqueName: \"kubernetes.io/projected/291edd7f-043c-45bc-9229-98c45b151377-kube-api-access-h8lck\") pod \"291edd7f-043c-45bc-9229-98c45b151377\" (UID: \"291edd7f-043c-45bc-9229-98c45b151377\") " Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.108229 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f9473ff-2e53-426d-8c1a-3da28f58404b-operator-scripts\") pod \"6f9473ff-2e53-426d-8c1a-3da28f58404b\" (UID: \"6f9473ff-2e53-426d-8c1a-3da28f58404b\") " Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.108262 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/291edd7f-043c-45bc-9229-98c45b151377-operator-scripts\") pod \"291edd7f-043c-45bc-9229-98c45b151377\" (UID: \"291edd7f-043c-45bc-9229-98c45b151377\") " Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.108313 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xljj\" (UniqueName: \"kubernetes.io/projected/6f9473ff-2e53-426d-8c1a-3da28f58404b-kube-api-access-9xljj\") pod \"6f9473ff-2e53-426d-8c1a-3da28f58404b\" (UID: \"6f9473ff-2e53-426d-8c1a-3da28f58404b\") " Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.108965 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f9473ff-2e53-426d-8c1a-3da28f58404b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6f9473ff-2e53-426d-8c1a-3da28f58404b" (UID: "6f9473ff-2e53-426d-8c1a-3da28f58404b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.109005 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/291edd7f-043c-45bc-9229-98c45b151377-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "291edd7f-043c-45bc-9229-98c45b151377" (UID: "291edd7f-043c-45bc-9229-98c45b151377"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.113237 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f9473ff-2e53-426d-8c1a-3da28f58404b-kube-api-access-9xljj" (OuterVolumeSpecName: "kube-api-access-9xljj") pod "6f9473ff-2e53-426d-8c1a-3da28f58404b" (UID: "6f9473ff-2e53-426d-8c1a-3da28f58404b"). InnerVolumeSpecName "kube-api-access-9xljj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.113304 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/291edd7f-043c-45bc-9229-98c45b151377-kube-api-access-h8lck" (OuterVolumeSpecName: "kube-api-access-h8lck") pod "291edd7f-043c-45bc-9229-98c45b151377" (UID: "291edd7f-043c-45bc-9229-98c45b151377"). InnerVolumeSpecName "kube-api-access-h8lck". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.209987 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6f9473ff-2e53-426d-8c1a-3da28f58404b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.210028 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/291edd7f-043c-45bc-9229-98c45b151377-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.210039 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xljj\" (UniqueName: \"kubernetes.io/projected/6f9473ff-2e53-426d-8c1a-3da28f58404b-kube-api-access-9xljj\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.210049 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8lck\" (UniqueName: \"kubernetes.io/projected/291edd7f-043c-45bc-9229-98c45b151377-kube-api-access-h8lck\") on node \"crc\" DevicePath \"\"" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.663605 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-627c-account-create-jm7w5" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.664721 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-627c-account-create-jm7w5" event={"ID":"6f9473ff-2e53-426d-8c1a-3da28f58404b","Type":"ContainerDied","Data":"a992f44917128685707ff6b1565745c3e88689fd9814e1868e3da7486cf75af0"} Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.664748 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a992f44917128685707ff6b1565745c3e88689fd9814e1868e3da7486cf75af0" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.666406 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-t2vdr" event={"ID":"291edd7f-043c-45bc-9229-98c45b151377","Type":"ContainerDied","Data":"68600d9dde403189bd828a1722254f95d2fcc345f4a5a0ebf62b2bd6d0352c39"} Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.666433 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68600d9dde403189bd828a1722254f95d2fcc345f4a5a0ebf62b2bd6d0352c39" Nov 25 10:25:54 crc kubenswrapper[4932]: I1125 10:25:54.666448 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-t2vdr" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.211330 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-grmcg"] Nov 25 10:25:59 crc kubenswrapper[4932]: E1125 10:25:59.212371 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f9473ff-2e53-426d-8c1a-3da28f58404b" containerName="mariadb-account-create" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.212392 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f9473ff-2e53-426d-8c1a-3da28f58404b" containerName="mariadb-account-create" Nov 25 10:25:59 crc kubenswrapper[4932]: E1125 10:25:59.212415 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="291edd7f-043c-45bc-9229-98c45b151377" containerName="mariadb-database-create" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.212423 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="291edd7f-043c-45bc-9229-98c45b151377" containerName="mariadb-database-create" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.212636 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="291edd7f-043c-45bc-9229-98c45b151377" containerName="mariadb-database-create" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.212682 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f9473ff-2e53-426d-8c1a-3da28f58404b" containerName="mariadb-account-create" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.213367 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.217150 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.217552 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-wzvrk" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.221169 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-grmcg"] Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.296304 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brj8n\" (UniqueName: \"kubernetes.io/projected/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-kube-api-access-brj8n\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.296416 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-combined-ca-bundle\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.296452 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-db-sync-config-data\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.296520 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-config-data\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.399094 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-combined-ca-bundle\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.399514 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-db-sync-config-data\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.399697 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-config-data\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.400738 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brj8n\" (UniqueName: \"kubernetes.io/projected/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-kube-api-access-brj8n\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.407564 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-db-sync-config-data\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.407697 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-combined-ca-bundle\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.413179 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-config-data\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.418480 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brj8n\" (UniqueName: \"kubernetes.io/projected/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-kube-api-access-brj8n\") pod \"glance-db-sync-grmcg\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.541424 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-grmcg" Nov 25 10:25:59 crc kubenswrapper[4932]: I1125 10:25:59.606223 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:25:59 crc kubenswrapper[4932]: E1125 10:25:59.606818 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:26:00 crc kubenswrapper[4932]: I1125 10:26:00.124023 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-grmcg"] Nov 25 10:26:00 crc kubenswrapper[4932]: I1125 10:26:00.730738 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-grmcg" event={"ID":"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80","Type":"ContainerStarted","Data":"73e8119678c6fe5aa82570d1cfee3311fc6e844eea2e8ad76def8c7d0b70e049"} Nov 25 10:26:01 crc kubenswrapper[4932]: I1125 10:26:01.739422 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-grmcg" event={"ID":"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80","Type":"ContainerStarted","Data":"247f6ee0835a7a4048979f720478ab7c1fb9af6965c1c9885c79bd30669dc82f"} Nov 25 10:26:01 crc kubenswrapper[4932]: I1125 10:26:01.759774 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-grmcg" podStartSLOduration=2.759757346 podStartE2EDuration="2.759757346s" podCreationTimestamp="2025-11-25 10:25:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:26:01.757557143 +0000 UTC m=+5821.883586726" watchObservedRunningTime="2025-11-25 10:26:01.759757346 +0000 UTC m=+5821.885786909" Nov 25 10:26:04 crc kubenswrapper[4932]: I1125 10:26:04.777104 4932 generic.go:334] "Generic (PLEG): container finished" podID="c0fbeab9-89fc-48ff-87f4-eacc07a5bc80" containerID="247f6ee0835a7a4048979f720478ab7c1fb9af6965c1c9885c79bd30669dc82f" exitCode=0 Nov 25 10:26:04 crc kubenswrapper[4932]: I1125 10:26:04.777254 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-grmcg" event={"ID":"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80","Type":"ContainerDied","Data":"247f6ee0835a7a4048979f720478ab7c1fb9af6965c1c9885c79bd30669dc82f"} Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.174160 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-grmcg" Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.244583 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-db-sync-config-data\") pod \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.244648 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-combined-ca-bundle\") pod \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.244732 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brj8n\" (UniqueName: \"kubernetes.io/projected/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-kube-api-access-brj8n\") pod \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.244773 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-config-data\") pod \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\" (UID: \"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80\") " Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.257480 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c0fbeab9-89fc-48ff-87f4-eacc07a5bc80" (UID: "c0fbeab9-89fc-48ff-87f4-eacc07a5bc80"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.257564 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-kube-api-access-brj8n" (OuterVolumeSpecName: "kube-api-access-brj8n") pod "c0fbeab9-89fc-48ff-87f4-eacc07a5bc80" (UID: "c0fbeab9-89fc-48ff-87f4-eacc07a5bc80"). InnerVolumeSpecName "kube-api-access-brj8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.270290 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0fbeab9-89fc-48ff-87f4-eacc07a5bc80" (UID: "c0fbeab9-89fc-48ff-87f4-eacc07a5bc80"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.296688 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-config-data" (OuterVolumeSpecName: "config-data") pod "c0fbeab9-89fc-48ff-87f4-eacc07a5bc80" (UID: "c0fbeab9-89fc-48ff-87f4-eacc07a5bc80"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.347600 4932 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.347649 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.347662 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brj8n\" (UniqueName: \"kubernetes.io/projected/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-kube-api-access-brj8n\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.347678 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.835545 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-grmcg" event={"ID":"c0fbeab9-89fc-48ff-87f4-eacc07a5bc80","Type":"ContainerDied","Data":"73e8119678c6fe5aa82570d1cfee3311fc6e844eea2e8ad76def8c7d0b70e049"} Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.835595 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73e8119678c6fe5aa82570d1cfee3311fc6e844eea2e8ad76def8c7d0b70e049" Nov 25 10:26:06 crc kubenswrapper[4932]: I1125 10:26:06.835643 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-grmcg" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.088325 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:26:07 crc kubenswrapper[4932]: E1125 10:26:07.089414 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0fbeab9-89fc-48ff-87f4-eacc07a5bc80" containerName="glance-db-sync" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.089485 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0fbeab9-89fc-48ff-87f4-eacc07a5bc80" containerName="glance-db-sync" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.089727 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0fbeab9-89fc-48ff-87f4-eacc07a5bc80" containerName="glance-db-sync" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.090885 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.092989 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-wzvrk" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.093371 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.093578 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.098368 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.164765 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-scripts\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.164957 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.165121 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-logs\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.165179 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.165437 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-config-data\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.165674 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsfsn\" (UniqueName: \"kubernetes.io/projected/065b3f76-ba41-45fd-94d5-14c8de61424e-kube-api-access-zsfsn\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.267261 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-config-data\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.267801 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsfsn\" (UniqueName: \"kubernetes.io/projected/065b3f76-ba41-45fd-94d5-14c8de61424e-kube-api-access-zsfsn\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.267944 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-scripts\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.268025 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.268132 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-logs\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.268241 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.268929 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.274186 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-config-data\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.278199 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-scripts\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.278452 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-logs\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.279935 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.307747 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6768c6b4f9-rj59q"] Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.312166 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsfsn\" (UniqueName: \"kubernetes.io/projected/065b3f76-ba41-45fd-94d5-14c8de61424e-kube-api-access-zsfsn\") pod \"glance-default-external-api-0\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.322268 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.327213 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6768c6b4f9-rj59q"] Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.372907 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-config\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.373002 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjcwd\" (UniqueName: \"kubernetes.io/projected/bba26e41-4fca-4967-b428-7c036676225c-kube-api-access-hjcwd\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.373076 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-dns-svc\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.373248 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-nb\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.373328 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-sb\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.428856 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.431519 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.433310 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.435895 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.439082 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.474900 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjcwd\" (UniqueName: \"kubernetes.io/projected/bba26e41-4fca-4967-b428-7c036676225c-kube-api-access-hjcwd\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.474961 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-dns-svc\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.475010 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-nb\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.475038 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-sb\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.475105 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-config\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.476122 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-nb\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.476598 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-config\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.476668 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-dns-svc\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.477137 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-sb\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.509037 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjcwd\" (UniqueName: \"kubernetes.io/projected/bba26e41-4fca-4967-b428-7c036676225c-kube-api-access-hjcwd\") pod \"dnsmasq-dns-6768c6b4f9-rj59q\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.576970 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.577559 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.577604 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.577641 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qh54h\" (UniqueName: \"kubernetes.io/projected/65784c15-6139-4fc5-99eb-590b74df68b9-kube-api-access-qh54h\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.577693 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.577718 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-logs\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.680798 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.680943 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.680984 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.681038 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qh54h\" (UniqueName: \"kubernetes.io/projected/65784c15-6139-4fc5-99eb-590b74df68b9-kube-api-access-qh54h\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.681125 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.681146 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-logs\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.681668 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-logs\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.681872 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.685386 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.697387 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.720968 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.737030 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.741497 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qh54h\" (UniqueName: \"kubernetes.io/projected/65784c15-6139-4fc5-99eb-590b74df68b9-kube-api-access-qh54h\") pod \"glance-default-internal-api-0\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:07 crc kubenswrapper[4932]: I1125 10:26:07.889012 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:08 crc kubenswrapper[4932]: I1125 10:26:08.193081 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:26:08 crc kubenswrapper[4932]: I1125 10:26:08.304809 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6768c6b4f9-rj59q"] Nov 25 10:26:08 crc kubenswrapper[4932]: I1125 10:26:08.590888 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:26:08 crc kubenswrapper[4932]: I1125 10:26:08.634802 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:26:08 crc kubenswrapper[4932]: I1125 10:26:08.879908 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" event={"ID":"bba26e41-4fca-4967-b428-7c036676225c","Type":"ContainerStarted","Data":"d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e"} Nov 25 10:26:08 crc kubenswrapper[4932]: I1125 10:26:08.880527 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" event={"ID":"bba26e41-4fca-4967-b428-7c036676225c","Type":"ContainerStarted","Data":"f9bb1a49282d92297bef1182b44c7d36c501e4477e9e3c865848bc850cfadeac"} Nov 25 10:26:08 crc kubenswrapper[4932]: I1125 10:26:08.882299 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"65784c15-6139-4fc5-99eb-590b74df68b9","Type":"ContainerStarted","Data":"364a23b7fc08bdc16e294f24804bac4ccd33bc1e96372c49083b684574bd076a"} Nov 25 10:26:08 crc kubenswrapper[4932]: I1125 10:26:08.883391 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"065b3f76-ba41-45fd-94d5-14c8de61424e","Type":"ContainerStarted","Data":"ecac5888b9ecfd1c77df2beb59e605850ed2ffc9852c6e60a22d34e3f39b7551"} Nov 25 10:26:09 crc kubenswrapper[4932]: I1125 10:26:09.767054 4932 scope.go:117] "RemoveContainer" containerID="1e3edaec830eb8ef93ad4f74da08142e55df6c6ca49fb644451029c001bc14fc" Nov 25 10:26:09 crc kubenswrapper[4932]: I1125 10:26:09.815950 4932 scope.go:117] "RemoveContainer" containerID="84a81bfd652accb4de3a7e0f99c56af42bc5527111407e000cfcfe989bb89adb" Nov 25 10:26:09 crc kubenswrapper[4932]: I1125 10:26:09.869153 4932 scope.go:117] "RemoveContainer" containerID="260876b905360bb9f15ab852b3df65659e790b27e81e897e00b3ab107240bc15" Nov 25 10:26:09 crc kubenswrapper[4932]: I1125 10:26:09.894480 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"65784c15-6139-4fc5-99eb-590b74df68b9","Type":"ContainerStarted","Data":"4efd01f06aee76751329ab51fd33f64a31d83b9d516c1ccb7a9a5f5b5b9fa02d"} Nov 25 10:26:09 crc kubenswrapper[4932]: I1125 10:26:09.918430 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"065b3f76-ba41-45fd-94d5-14c8de61424e","Type":"ContainerStarted","Data":"fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9"} Nov 25 10:26:09 crc kubenswrapper[4932]: I1125 10:26:09.918517 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"065b3f76-ba41-45fd-94d5-14c8de61424e","Type":"ContainerStarted","Data":"ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed"} Nov 25 10:26:09 crc kubenswrapper[4932]: I1125 10:26:09.925077 4932 generic.go:334] "Generic (PLEG): container finished" podID="bba26e41-4fca-4967-b428-7c036676225c" containerID="d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e" exitCode=0 Nov 25 10:26:09 crc kubenswrapper[4932]: I1125 10:26:09.925184 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" event={"ID":"bba26e41-4fca-4967-b428-7c036676225c","Type":"ContainerDied","Data":"d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e"} Nov 25 10:26:10 crc kubenswrapper[4932]: I1125 10:26:10.069411 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:26:10 crc kubenswrapper[4932]: I1125 10:26:10.614395 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:26:10 crc kubenswrapper[4932]: E1125 10:26:10.614655 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:26:10 crc kubenswrapper[4932]: I1125 10:26:10.980852 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"65784c15-6139-4fc5-99eb-590b74df68b9","Type":"ContainerStarted","Data":"d859b3e50cbe8eddf412b08c9d9d1d26172103860f280f20f7c7badfaa6db20c"} Nov 25 10:26:10 crc kubenswrapper[4932]: I1125 10:26:10.981388 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="65784c15-6139-4fc5-99eb-590b74df68b9" containerName="glance-log" containerID="cri-o://4efd01f06aee76751329ab51fd33f64a31d83b9d516c1ccb7a9a5f5b5b9fa02d" gracePeriod=30 Nov 25 10:26:10 crc kubenswrapper[4932]: I1125 10:26:10.981817 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="65784c15-6139-4fc5-99eb-590b74df68b9" containerName="glance-httpd" containerID="cri-o://d859b3e50cbe8eddf412b08c9d9d1d26172103860f280f20f7c7badfaa6db20c" gracePeriod=30 Nov 25 10:26:10 crc kubenswrapper[4932]: I1125 10:26:10.983751 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="065b3f76-ba41-45fd-94d5-14c8de61424e" containerName="glance-log" containerID="cri-o://ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed" gracePeriod=30 Nov 25 10:26:10 crc kubenswrapper[4932]: I1125 10:26:10.984508 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" event={"ID":"bba26e41-4fca-4967-b428-7c036676225c","Type":"ContainerStarted","Data":"d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3"} Nov 25 10:26:10 crc kubenswrapper[4932]: I1125 10:26:10.984539 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:10 crc kubenswrapper[4932]: I1125 10:26:10.984584 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="065b3f76-ba41-45fd-94d5-14c8de61424e" containerName="glance-httpd" containerID="cri-o://fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9" gracePeriod=30 Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.008375 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.008353989 podStartE2EDuration="4.008353989s" podCreationTimestamp="2025-11-25 10:26:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:26:11.005608541 +0000 UTC m=+5831.131638094" watchObservedRunningTime="2025-11-25 10:26:11.008353989 +0000 UTC m=+5831.134383552" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.057568 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" podStartSLOduration=4.057548659 podStartE2EDuration="4.057548659s" podCreationTimestamp="2025-11-25 10:26:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:26:11.057450496 +0000 UTC m=+5831.183480059" watchObservedRunningTime="2025-11-25 10:26:11.057548659 +0000 UTC m=+5831.183578222" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.059964 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.059956408 podStartE2EDuration="4.059956408s" podCreationTimestamp="2025-11-25 10:26:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:26:11.037765642 +0000 UTC m=+5831.163795205" watchObservedRunningTime="2025-11-25 10:26:11.059956408 +0000 UTC m=+5831.185985971" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.717063 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.829396 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-config-data\") pod \"065b3f76-ba41-45fd-94d5-14c8de61424e\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.829679 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-scripts\") pod \"065b3f76-ba41-45fd-94d5-14c8de61424e\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.829778 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsfsn\" (UniqueName: \"kubernetes.io/projected/065b3f76-ba41-45fd-94d5-14c8de61424e-kube-api-access-zsfsn\") pod \"065b3f76-ba41-45fd-94d5-14c8de61424e\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.829868 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-httpd-run\") pod \"065b3f76-ba41-45fd-94d5-14c8de61424e\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.829934 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-logs\") pod \"065b3f76-ba41-45fd-94d5-14c8de61424e\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.830030 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-combined-ca-bundle\") pod \"065b3f76-ba41-45fd-94d5-14c8de61424e\" (UID: \"065b3f76-ba41-45fd-94d5-14c8de61424e\") " Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.830556 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "065b3f76-ba41-45fd-94d5-14c8de61424e" (UID: "065b3f76-ba41-45fd-94d5-14c8de61424e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.830693 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-logs" (OuterVolumeSpecName: "logs") pod "065b3f76-ba41-45fd-94d5-14c8de61424e" (UID: "065b3f76-ba41-45fd-94d5-14c8de61424e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.830843 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.830889 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/065b3f76-ba41-45fd-94d5-14c8de61424e-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.834963 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-scripts" (OuterVolumeSpecName: "scripts") pod "065b3f76-ba41-45fd-94d5-14c8de61424e" (UID: "065b3f76-ba41-45fd-94d5-14c8de61424e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.835335 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/065b3f76-ba41-45fd-94d5-14c8de61424e-kube-api-access-zsfsn" (OuterVolumeSpecName: "kube-api-access-zsfsn") pod "065b3f76-ba41-45fd-94d5-14c8de61424e" (UID: "065b3f76-ba41-45fd-94d5-14c8de61424e"). InnerVolumeSpecName "kube-api-access-zsfsn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.867967 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "065b3f76-ba41-45fd-94d5-14c8de61424e" (UID: "065b3f76-ba41-45fd-94d5-14c8de61424e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.914962 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-config-data" (OuterVolumeSpecName: "config-data") pod "065b3f76-ba41-45fd-94d5-14c8de61424e" (UID: "065b3f76-ba41-45fd-94d5-14c8de61424e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.933629 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.933668 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zsfsn\" (UniqueName: \"kubernetes.io/projected/065b3f76-ba41-45fd-94d5-14c8de61424e-kube-api-access-zsfsn\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.933683 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.933693 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/065b3f76-ba41-45fd-94d5-14c8de61424e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.993995 4932 generic.go:334] "Generic (PLEG): container finished" podID="65784c15-6139-4fc5-99eb-590b74df68b9" containerID="d859b3e50cbe8eddf412b08c9d9d1d26172103860f280f20f7c7badfaa6db20c" exitCode=0 Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.994043 4932 generic.go:334] "Generic (PLEG): container finished" podID="65784c15-6139-4fc5-99eb-590b74df68b9" containerID="4efd01f06aee76751329ab51fd33f64a31d83b9d516c1ccb7a9a5f5b5b9fa02d" exitCode=143 Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.994095 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"65784c15-6139-4fc5-99eb-590b74df68b9","Type":"ContainerDied","Data":"d859b3e50cbe8eddf412b08c9d9d1d26172103860f280f20f7c7badfaa6db20c"} Nov 25 10:26:11 crc kubenswrapper[4932]: I1125 10:26:11.994126 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"65784c15-6139-4fc5-99eb-590b74df68b9","Type":"ContainerDied","Data":"4efd01f06aee76751329ab51fd33f64a31d83b9d516c1ccb7a9a5f5b5b9fa02d"} Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:11.999991 4932 generic.go:334] "Generic (PLEG): container finished" podID="065b3f76-ba41-45fd-94d5-14c8de61424e" containerID="fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9" exitCode=0 Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.000021 4932 generic.go:334] "Generic (PLEG): container finished" podID="065b3f76-ba41-45fd-94d5-14c8de61424e" containerID="ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed" exitCode=143 Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.000767 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.001065 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"065b3f76-ba41-45fd-94d5-14c8de61424e","Type":"ContainerDied","Data":"fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9"} Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.001105 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"065b3f76-ba41-45fd-94d5-14c8de61424e","Type":"ContainerDied","Data":"ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed"} Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.001117 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"065b3f76-ba41-45fd-94d5-14c8de61424e","Type":"ContainerDied","Data":"ecac5888b9ecfd1c77df2beb59e605850ed2ffc9852c6e60a22d34e3f39b7551"} Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.001134 4932 scope.go:117] "RemoveContainer" containerID="fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.048405 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.062542 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.068415 4932 scope.go:117] "RemoveContainer" containerID="ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.082176 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:26:12 crc kubenswrapper[4932]: E1125 10:26:12.082634 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="065b3f76-ba41-45fd-94d5-14c8de61424e" containerName="glance-log" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.082649 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="065b3f76-ba41-45fd-94d5-14c8de61424e" containerName="glance-log" Nov 25 10:26:12 crc kubenswrapper[4932]: E1125 10:26:12.082674 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="065b3f76-ba41-45fd-94d5-14c8de61424e" containerName="glance-httpd" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.082680 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="065b3f76-ba41-45fd-94d5-14c8de61424e" containerName="glance-httpd" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.082835 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="065b3f76-ba41-45fd-94d5-14c8de61424e" containerName="glance-httpd" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.082858 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="065b3f76-ba41-45fd-94d5-14c8de61424e" containerName="glance-log" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.084157 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.088089 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.088541 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.102953 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.127421 4932 scope.go:117] "RemoveContainer" containerID="fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9" Nov 25 10:26:12 crc kubenswrapper[4932]: E1125 10:26:12.127880 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9\": container with ID starting with fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9 not found: ID does not exist" containerID="fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.127924 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9"} err="failed to get container status \"fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9\": rpc error: code = NotFound desc = could not find container \"fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9\": container with ID starting with fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9 not found: ID does not exist" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.127950 4932 scope.go:117] "RemoveContainer" containerID="ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed" Nov 25 10:26:12 crc kubenswrapper[4932]: E1125 10:26:12.128518 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed\": container with ID starting with ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed not found: ID does not exist" containerID="ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.128548 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed"} err="failed to get container status \"ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed\": rpc error: code = NotFound desc = could not find container \"ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed\": container with ID starting with ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed not found: ID does not exist" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.128612 4932 scope.go:117] "RemoveContainer" containerID="fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.128844 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9"} err="failed to get container status \"fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9\": rpc error: code = NotFound desc = could not find container \"fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9\": container with ID starting with fe81a95536a7d0a0277d5067bf025e07edb34b48e0a6e452302065855bf55ee9 not found: ID does not exist" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.128867 4932 scope.go:117] "RemoveContainer" containerID="ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.129058 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed"} err="failed to get container status \"ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed\": rpc error: code = NotFound desc = could not find container \"ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed\": container with ID starting with ae7eac43b045001daafcfaa3bdcbcc273a8b5e719531299c5193de34051889ed not found: ID does not exist" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.149275 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.242743 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-logs\") pod \"65784c15-6139-4fc5-99eb-590b74df68b9\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.242842 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-httpd-run\") pod \"65784c15-6139-4fc5-99eb-590b74df68b9\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.242873 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qh54h\" (UniqueName: \"kubernetes.io/projected/65784c15-6139-4fc5-99eb-590b74df68b9-kube-api-access-qh54h\") pod \"65784c15-6139-4fc5-99eb-590b74df68b9\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.242992 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-scripts\") pod \"65784c15-6139-4fc5-99eb-590b74df68b9\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.243020 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-combined-ca-bundle\") pod \"65784c15-6139-4fc5-99eb-590b74df68b9\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.243061 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-config-data\") pod \"65784c15-6139-4fc5-99eb-590b74df68b9\" (UID: \"65784c15-6139-4fc5-99eb-590b74df68b9\") " Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.243513 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.243536 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "65784c15-6139-4fc5-99eb-590b74df68b9" (UID: "65784c15-6139-4fc5-99eb-590b74df68b9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.243819 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-logs" (OuterVolumeSpecName: "logs") pod "65784c15-6139-4fc5-99eb-590b74df68b9" (UID: "65784c15-6139-4fc5-99eb-590b74df68b9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.243569 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-scripts\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.244102 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-config-data\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.244339 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.244443 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grj8w\" (UniqueName: \"kubernetes.io/projected/f69b7f54-bfa9-45b9-9058-f32978b115aa-kube-api-access-grj8w\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.244482 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.244649 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-logs\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.244826 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.244851 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/65784c15-6139-4fc5-99eb-590b74df68b9-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.247754 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-scripts" (OuterVolumeSpecName: "scripts") pod "65784c15-6139-4fc5-99eb-590b74df68b9" (UID: "65784c15-6139-4fc5-99eb-590b74df68b9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.249216 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65784c15-6139-4fc5-99eb-590b74df68b9-kube-api-access-qh54h" (OuterVolumeSpecName: "kube-api-access-qh54h") pod "65784c15-6139-4fc5-99eb-590b74df68b9" (UID: "65784c15-6139-4fc5-99eb-590b74df68b9"). InnerVolumeSpecName "kube-api-access-qh54h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.271206 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "65784c15-6139-4fc5-99eb-590b74df68b9" (UID: "65784c15-6139-4fc5-99eb-590b74df68b9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.303999 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-config-data" (OuterVolumeSpecName: "config-data") pod "65784c15-6139-4fc5-99eb-590b74df68b9" (UID: "65784c15-6139-4fc5-99eb-590b74df68b9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.346357 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.346421 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grj8w\" (UniqueName: \"kubernetes.io/projected/f69b7f54-bfa9-45b9-9058-f32978b115aa-kube-api-access-grj8w\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.346450 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.346522 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-logs\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.346593 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.346622 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-scripts\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.346675 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-config-data\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.347790 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qh54h\" (UniqueName: \"kubernetes.io/projected/65784c15-6139-4fc5-99eb-590b74df68b9-kube-api-access-qh54h\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.347816 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.347830 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.347834 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.347843 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65784c15-6139-4fc5-99eb-590b74df68b9-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.351769 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-scripts\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.352222 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.352397 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-logs\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.352488 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-config-data\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.352865 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.363414 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grj8w\" (UniqueName: \"kubernetes.io/projected/f69b7f54-bfa9-45b9-9058-f32978b115aa-kube-api-access-grj8w\") pod \"glance-default-external-api-0\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.424852 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.624582 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="065b3f76-ba41-45fd-94d5-14c8de61424e" path="/var/lib/kubelet/pods/065b3f76-ba41-45fd-94d5-14c8de61424e/volumes" Nov 25 10:26:12 crc kubenswrapper[4932]: I1125 10:26:12.966518 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:26:12 crc kubenswrapper[4932]: W1125 10:26:12.972609 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf69b7f54_bfa9_45b9_9058_f32978b115aa.slice/crio-9f5d3c48314bcc7ffafebb2384e3b8c793892bf7dea1e6c418a9ede309f1239e WatchSource:0}: Error finding container 9f5d3c48314bcc7ffafebb2384e3b8c793892bf7dea1e6c418a9ede309f1239e: Status 404 returned error can't find the container with id 9f5d3c48314bcc7ffafebb2384e3b8c793892bf7dea1e6c418a9ede309f1239e Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.013438 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f69b7f54-bfa9-45b9-9058-f32978b115aa","Type":"ContainerStarted","Data":"9f5d3c48314bcc7ffafebb2384e3b8c793892bf7dea1e6c418a9ede309f1239e"} Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.016342 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"65784c15-6139-4fc5-99eb-590b74df68b9","Type":"ContainerDied","Data":"364a23b7fc08bdc16e294f24804bac4ccd33bc1e96372c49083b684574bd076a"} Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.016364 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.016422 4932 scope.go:117] "RemoveContainer" containerID="d859b3e50cbe8eddf412b08c9d9d1d26172103860f280f20f7c7badfaa6db20c" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.046792 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.058229 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.072564 4932 scope.go:117] "RemoveContainer" containerID="4efd01f06aee76751329ab51fd33f64a31d83b9d516c1ccb7a9a5f5b5b9fa02d" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.073874 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:26:13 crc kubenswrapper[4932]: E1125 10:26:13.074274 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65784c15-6139-4fc5-99eb-590b74df68b9" containerName="glance-log" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.074289 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="65784c15-6139-4fc5-99eb-590b74df68b9" containerName="glance-log" Nov 25 10:26:13 crc kubenswrapper[4932]: E1125 10:26:13.074329 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65784c15-6139-4fc5-99eb-590b74df68b9" containerName="glance-httpd" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.074336 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="65784c15-6139-4fc5-99eb-590b74df68b9" containerName="glance-httpd" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.074517 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="65784c15-6139-4fc5-99eb-590b74df68b9" containerName="glance-log" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.074534 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="65784c15-6139-4fc5-99eb-590b74df68b9" containerName="glance-httpd" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.075538 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.081116 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.082251 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.103425 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.171281 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-logs\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.171330 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-config-data\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.171363 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.171441 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.171464 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4lwf\" (UniqueName: \"kubernetes.io/projected/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-kube-api-access-f4lwf\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.171489 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-scripts\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.171567 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.272837 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-logs\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.273163 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-config-data\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.273208 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.273291 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.273315 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4lwf\" (UniqueName: \"kubernetes.io/projected/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-kube-api-access-f4lwf\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.273341 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-scripts\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.273426 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.273681 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-logs\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.273768 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.278478 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-scripts\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.279898 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.280108 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-config-data\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.282977 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.292526 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4lwf\" (UniqueName: \"kubernetes.io/projected/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-kube-api-access-f4lwf\") pod \"glance-default-internal-api-0\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.407032 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:13 crc kubenswrapper[4932]: I1125 10:26:13.938061 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:26:13 crc kubenswrapper[4932]: W1125 10:26:13.940078 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod568f6a1f_9f3c_4cda_9f7d_f844a40b4909.slice/crio-46bcd8b229cd66e9a784c323f770e57c610f768abea9798c94fae3b4061d4ed2 WatchSource:0}: Error finding container 46bcd8b229cd66e9a784c323f770e57c610f768abea9798c94fae3b4061d4ed2: Status 404 returned error can't find the container with id 46bcd8b229cd66e9a784c323f770e57c610f768abea9798c94fae3b4061d4ed2 Nov 25 10:26:14 crc kubenswrapper[4932]: I1125 10:26:14.028271 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f69b7f54-bfa9-45b9-9058-f32978b115aa","Type":"ContainerStarted","Data":"0b68ecf65e763f46d73c10cbcafc18a631c5e413211b52149a8c586b64123046"} Nov 25 10:26:14 crc kubenswrapper[4932]: I1125 10:26:14.031664 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"568f6a1f-9f3c-4cda-9f7d-f844a40b4909","Type":"ContainerStarted","Data":"46bcd8b229cd66e9a784c323f770e57c610f768abea9798c94fae3b4061d4ed2"} Nov 25 10:26:14 crc kubenswrapper[4932]: I1125 10:26:14.625003 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65784c15-6139-4fc5-99eb-590b74df68b9" path="/var/lib/kubelet/pods/65784c15-6139-4fc5-99eb-590b74df68b9/volumes" Nov 25 10:26:15 crc kubenswrapper[4932]: I1125 10:26:15.044403 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f69b7f54-bfa9-45b9-9058-f32978b115aa","Type":"ContainerStarted","Data":"cfcb4d993bb53c8cdf9fbd009c8d7c86a8ff41ee936820a577f8fabcdc2e6601"} Nov 25 10:26:15 crc kubenswrapper[4932]: I1125 10:26:15.048092 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"568f6a1f-9f3c-4cda-9f7d-f844a40b4909","Type":"ContainerStarted","Data":"8b472d52078147695a16b60985f261b472d51b058778a2a54298189701fb2c6a"} Nov 25 10:26:15 crc kubenswrapper[4932]: I1125 10:26:15.067839 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.067819865 podStartE2EDuration="3.067819865s" podCreationTimestamp="2025-11-25 10:26:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:26:15.065938201 +0000 UTC m=+5835.191967774" watchObservedRunningTime="2025-11-25 10:26:15.067819865 +0000 UTC m=+5835.193849428" Nov 25 10:26:16 crc kubenswrapper[4932]: I1125 10:26:16.060991 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"568f6a1f-9f3c-4cda-9f7d-f844a40b4909","Type":"ContainerStarted","Data":"349c60cc55bbe03b4c6543be441fde234a2d3f06f6bf43b4563a1a30f2766417"} Nov 25 10:26:16 crc kubenswrapper[4932]: I1125 10:26:16.093119 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.093098784 podStartE2EDuration="3.093098784s" podCreationTimestamp="2025-11-25 10:26:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:26:16.082323225 +0000 UTC m=+5836.208352808" watchObservedRunningTime="2025-11-25 10:26:16.093098784 +0000 UTC m=+5836.219128337" Nov 25 10:26:17 crc kubenswrapper[4932]: I1125 10:26:17.691164 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:17 crc kubenswrapper[4932]: I1125 10:26:17.745486 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c944859d7-d4x4f"] Nov 25 10:26:17 crc kubenswrapper[4932]: I1125 10:26:17.745699 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" podUID="bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" containerName="dnsmasq-dns" containerID="cri-o://2ed2ab6ea243326a881b722ff99d48088ac86a085409b6cb43f4f7c02dc6fea0" gracePeriod=10 Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.109812 4932 generic.go:334] "Generic (PLEG): container finished" podID="bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" containerID="2ed2ab6ea243326a881b722ff99d48088ac86a085409b6cb43f4f7c02dc6fea0" exitCode=0 Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.109859 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" event={"ID":"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b","Type":"ContainerDied","Data":"2ed2ab6ea243326a881b722ff99d48088ac86a085409b6cb43f4f7c02dc6fea0"} Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.287240 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.375128 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-nb\") pod \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.375254 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffdpx\" (UniqueName: \"kubernetes.io/projected/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-kube-api-access-ffdpx\") pod \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.375336 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-dns-svc\") pod \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.375462 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-config\") pod \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.375513 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-sb\") pod \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\" (UID: \"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b\") " Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.386846 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-kube-api-access-ffdpx" (OuterVolumeSpecName: "kube-api-access-ffdpx") pod "bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" (UID: "bf69fad0-d63c-45d3-b1ce-bb99919f9d9b"). InnerVolumeSpecName "kube-api-access-ffdpx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.430158 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" (UID: "bf69fad0-d63c-45d3-b1ce-bb99919f9d9b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.430422 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" (UID: "bf69fad0-d63c-45d3-b1ce-bb99919f9d9b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.438038 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-config" (OuterVolumeSpecName: "config") pod "bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" (UID: "bf69fad0-d63c-45d3-b1ce-bb99919f9d9b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.439402 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" (UID: "bf69fad0-d63c-45d3-b1ce-bb99919f9d9b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.479730 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.479767 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.479779 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.479789 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffdpx\" (UniqueName: \"kubernetes.io/projected/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-kube-api-access-ffdpx\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:18 crc kubenswrapper[4932]: I1125 10:26:18.479798 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:19 crc kubenswrapper[4932]: I1125 10:26:19.119428 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" event={"ID":"bf69fad0-d63c-45d3-b1ce-bb99919f9d9b","Type":"ContainerDied","Data":"a0fe21c29a68090f0ecb006fbe90b05e8d6e1e59687027c16c592ae7ac55130a"} Nov 25 10:26:19 crc kubenswrapper[4932]: I1125 10:26:19.119502 4932 scope.go:117] "RemoveContainer" containerID="2ed2ab6ea243326a881b722ff99d48088ac86a085409b6cb43f4f7c02dc6fea0" Nov 25 10:26:19 crc kubenswrapper[4932]: I1125 10:26:19.119505 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c944859d7-d4x4f" Nov 25 10:26:19 crc kubenswrapper[4932]: I1125 10:26:19.143699 4932 scope.go:117] "RemoveContainer" containerID="5d02cff61ad6bfecad1b394b204bb55f6f05e041ab27663c230abac75231275d" Nov 25 10:26:19 crc kubenswrapper[4932]: I1125 10:26:19.150532 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c944859d7-d4x4f"] Nov 25 10:26:19 crc kubenswrapper[4932]: I1125 10:26:19.172759 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c944859d7-d4x4f"] Nov 25 10:26:20 crc kubenswrapper[4932]: I1125 10:26:20.620496 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" path="/var/lib/kubelet/pods/bf69fad0-d63c-45d3-b1ce-bb99919f9d9b/volumes" Nov 25 10:26:22 crc kubenswrapper[4932]: I1125 10:26:22.425278 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 10:26:22 crc kubenswrapper[4932]: I1125 10:26:22.425667 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 10:26:22 crc kubenswrapper[4932]: I1125 10:26:22.458822 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 10:26:22 crc kubenswrapper[4932]: I1125 10:26:22.468346 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 10:26:23 crc kubenswrapper[4932]: I1125 10:26:23.153313 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 10:26:23 crc kubenswrapper[4932]: I1125 10:26:23.153359 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 10:26:23 crc kubenswrapper[4932]: I1125 10:26:23.408012 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:23 crc kubenswrapper[4932]: I1125 10:26:23.409595 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:23 crc kubenswrapper[4932]: I1125 10:26:23.444629 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:23 crc kubenswrapper[4932]: I1125 10:26:23.470663 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:24 crc kubenswrapper[4932]: I1125 10:26:24.179331 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:24 crc kubenswrapper[4932]: I1125 10:26:24.179594 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:25 crc kubenswrapper[4932]: I1125 10:26:25.239877 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 10:26:25 crc kubenswrapper[4932]: I1125 10:26:25.239976 4932 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:26:25 crc kubenswrapper[4932]: I1125 10:26:25.245431 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 10:26:25 crc kubenswrapper[4932]: I1125 10:26:25.605832 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:26:25 crc kubenswrapper[4932]: E1125 10:26:25.606352 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:26:26 crc kubenswrapper[4932]: I1125 10:26:26.210263 4932 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:26:26 crc kubenswrapper[4932]: I1125 10:26:26.210317 4932 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:26:26 crc kubenswrapper[4932]: I1125 10:26:26.324341 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:26 crc kubenswrapper[4932]: I1125 10:26:26.366445 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.602696 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-njdms"] Nov 25 10:26:32 crc kubenswrapper[4932]: E1125 10:26:32.603845 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" containerName="init" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.603864 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" containerName="init" Nov 25 10:26:32 crc kubenswrapper[4932]: E1125 10:26:32.603884 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" containerName="dnsmasq-dns" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.603891 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" containerName="dnsmasq-dns" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.604083 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf69fad0-d63c-45d3-b1ce-bb99919f9d9b" containerName="dnsmasq-dns" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.604863 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-njdms" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.620978 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-njdms"] Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.661072 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83dd0ea2-f060-47a7-822e-5f6a8f605df5-operator-scripts\") pod \"placement-db-create-njdms\" (UID: \"83dd0ea2-f060-47a7-822e-5f6a8f605df5\") " pod="openstack/placement-db-create-njdms" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.661597 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-995xw\" (UniqueName: \"kubernetes.io/projected/83dd0ea2-f060-47a7-822e-5f6a8f605df5-kube-api-access-995xw\") pod \"placement-db-create-njdms\" (UID: \"83dd0ea2-f060-47a7-822e-5f6a8f605df5\") " pod="openstack/placement-db-create-njdms" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.701101 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-f085-account-create-x4kwg"] Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.702177 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f085-account-create-x4kwg" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.706600 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.717242 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f085-account-create-x4kwg"] Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.763325 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83dd0ea2-f060-47a7-822e-5f6a8f605df5-operator-scripts\") pod \"placement-db-create-njdms\" (UID: \"83dd0ea2-f060-47a7-822e-5f6a8f605df5\") " pod="openstack/placement-db-create-njdms" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.763440 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18658aa6-a9b1-4277-80ba-304ac1a91ab0-operator-scripts\") pod \"placement-f085-account-create-x4kwg\" (UID: \"18658aa6-a9b1-4277-80ba-304ac1a91ab0\") " pod="openstack/placement-f085-account-create-x4kwg" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.763510 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56b85\" (UniqueName: \"kubernetes.io/projected/18658aa6-a9b1-4277-80ba-304ac1a91ab0-kube-api-access-56b85\") pod \"placement-f085-account-create-x4kwg\" (UID: \"18658aa6-a9b1-4277-80ba-304ac1a91ab0\") " pod="openstack/placement-f085-account-create-x4kwg" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.763570 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-995xw\" (UniqueName: \"kubernetes.io/projected/83dd0ea2-f060-47a7-822e-5f6a8f605df5-kube-api-access-995xw\") pod \"placement-db-create-njdms\" (UID: \"83dd0ea2-f060-47a7-822e-5f6a8f605df5\") " pod="openstack/placement-db-create-njdms" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.764426 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83dd0ea2-f060-47a7-822e-5f6a8f605df5-operator-scripts\") pod \"placement-db-create-njdms\" (UID: \"83dd0ea2-f060-47a7-822e-5f6a8f605df5\") " pod="openstack/placement-db-create-njdms" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.788721 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-995xw\" (UniqueName: \"kubernetes.io/projected/83dd0ea2-f060-47a7-822e-5f6a8f605df5-kube-api-access-995xw\") pod \"placement-db-create-njdms\" (UID: \"83dd0ea2-f060-47a7-822e-5f6a8f605df5\") " pod="openstack/placement-db-create-njdms" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.866139 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56b85\" (UniqueName: \"kubernetes.io/projected/18658aa6-a9b1-4277-80ba-304ac1a91ab0-kube-api-access-56b85\") pod \"placement-f085-account-create-x4kwg\" (UID: \"18658aa6-a9b1-4277-80ba-304ac1a91ab0\") " pod="openstack/placement-f085-account-create-x4kwg" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.866835 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18658aa6-a9b1-4277-80ba-304ac1a91ab0-operator-scripts\") pod \"placement-f085-account-create-x4kwg\" (UID: \"18658aa6-a9b1-4277-80ba-304ac1a91ab0\") " pod="openstack/placement-f085-account-create-x4kwg" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.867958 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18658aa6-a9b1-4277-80ba-304ac1a91ab0-operator-scripts\") pod \"placement-f085-account-create-x4kwg\" (UID: \"18658aa6-a9b1-4277-80ba-304ac1a91ab0\") " pod="openstack/placement-f085-account-create-x4kwg" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.885746 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56b85\" (UniqueName: \"kubernetes.io/projected/18658aa6-a9b1-4277-80ba-304ac1a91ab0-kube-api-access-56b85\") pod \"placement-f085-account-create-x4kwg\" (UID: \"18658aa6-a9b1-4277-80ba-304ac1a91ab0\") " pod="openstack/placement-f085-account-create-x4kwg" Nov 25 10:26:32 crc kubenswrapper[4932]: I1125 10:26:32.928472 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-njdms" Nov 25 10:26:33 crc kubenswrapper[4932]: I1125 10:26:33.022357 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f085-account-create-x4kwg" Nov 25 10:26:33 crc kubenswrapper[4932]: I1125 10:26:33.495393 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-njdms"] Nov 25 10:26:33 crc kubenswrapper[4932]: I1125 10:26:33.576891 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f085-account-create-x4kwg"] Nov 25 10:26:33 crc kubenswrapper[4932]: W1125 10:26:33.582622 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18658aa6_a9b1_4277_80ba_304ac1a91ab0.slice/crio-c6df11fb225dad353defcd65e0004dbabb8afd03d32110792b5887e82d2abd08 WatchSource:0}: Error finding container c6df11fb225dad353defcd65e0004dbabb8afd03d32110792b5887e82d2abd08: Status 404 returned error can't find the container with id c6df11fb225dad353defcd65e0004dbabb8afd03d32110792b5887e82d2abd08 Nov 25 10:26:34 crc kubenswrapper[4932]: I1125 10:26:34.301500 4932 generic.go:334] "Generic (PLEG): container finished" podID="18658aa6-a9b1-4277-80ba-304ac1a91ab0" containerID="fa926cda7411684699e7acc49541bf3b4bb27e2b69ac73b93528fd33617bfe98" exitCode=0 Nov 25 10:26:34 crc kubenswrapper[4932]: I1125 10:26:34.301620 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f085-account-create-x4kwg" event={"ID":"18658aa6-a9b1-4277-80ba-304ac1a91ab0","Type":"ContainerDied","Data":"fa926cda7411684699e7acc49541bf3b4bb27e2b69ac73b93528fd33617bfe98"} Nov 25 10:26:34 crc kubenswrapper[4932]: I1125 10:26:34.301867 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f085-account-create-x4kwg" event={"ID":"18658aa6-a9b1-4277-80ba-304ac1a91ab0","Type":"ContainerStarted","Data":"c6df11fb225dad353defcd65e0004dbabb8afd03d32110792b5887e82d2abd08"} Nov 25 10:26:34 crc kubenswrapper[4932]: I1125 10:26:34.303832 4932 generic.go:334] "Generic (PLEG): container finished" podID="83dd0ea2-f060-47a7-822e-5f6a8f605df5" containerID="6f5808e023f540eeccb7882ca7e1bfc802f49e825ec3765724bdfc77d6470345" exitCode=0 Nov 25 10:26:34 crc kubenswrapper[4932]: I1125 10:26:34.303884 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-njdms" event={"ID":"83dd0ea2-f060-47a7-822e-5f6a8f605df5","Type":"ContainerDied","Data":"6f5808e023f540eeccb7882ca7e1bfc802f49e825ec3765724bdfc77d6470345"} Nov 25 10:26:34 crc kubenswrapper[4932]: I1125 10:26:34.303921 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-njdms" event={"ID":"83dd0ea2-f060-47a7-822e-5f6a8f605df5","Type":"ContainerStarted","Data":"6d7aa15f65e1b44d6732f351f5248ccd46d47db91b5bface13141852b538cc02"} Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.727629 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-njdms" Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.731831 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f085-account-create-x4kwg" Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.826965 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-995xw\" (UniqueName: \"kubernetes.io/projected/83dd0ea2-f060-47a7-822e-5f6a8f605df5-kube-api-access-995xw\") pod \"83dd0ea2-f060-47a7-822e-5f6a8f605df5\" (UID: \"83dd0ea2-f060-47a7-822e-5f6a8f605df5\") " Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.827428 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56b85\" (UniqueName: \"kubernetes.io/projected/18658aa6-a9b1-4277-80ba-304ac1a91ab0-kube-api-access-56b85\") pod \"18658aa6-a9b1-4277-80ba-304ac1a91ab0\" (UID: \"18658aa6-a9b1-4277-80ba-304ac1a91ab0\") " Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.827548 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83dd0ea2-f060-47a7-822e-5f6a8f605df5-operator-scripts\") pod \"83dd0ea2-f060-47a7-822e-5f6a8f605df5\" (UID: \"83dd0ea2-f060-47a7-822e-5f6a8f605df5\") " Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.827862 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18658aa6-a9b1-4277-80ba-304ac1a91ab0-operator-scripts\") pod \"18658aa6-a9b1-4277-80ba-304ac1a91ab0\" (UID: \"18658aa6-a9b1-4277-80ba-304ac1a91ab0\") " Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.828172 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83dd0ea2-f060-47a7-822e-5f6a8f605df5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "83dd0ea2-f060-47a7-822e-5f6a8f605df5" (UID: "83dd0ea2-f060-47a7-822e-5f6a8f605df5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.828442 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18658aa6-a9b1-4277-80ba-304ac1a91ab0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "18658aa6-a9b1-4277-80ba-304ac1a91ab0" (UID: "18658aa6-a9b1-4277-80ba-304ac1a91ab0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.828575 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/18658aa6-a9b1-4277-80ba-304ac1a91ab0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.828654 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83dd0ea2-f060-47a7-822e-5f6a8f605df5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.832715 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83dd0ea2-f060-47a7-822e-5f6a8f605df5-kube-api-access-995xw" (OuterVolumeSpecName: "kube-api-access-995xw") pod "83dd0ea2-f060-47a7-822e-5f6a8f605df5" (UID: "83dd0ea2-f060-47a7-822e-5f6a8f605df5"). InnerVolumeSpecName "kube-api-access-995xw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.832979 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18658aa6-a9b1-4277-80ba-304ac1a91ab0-kube-api-access-56b85" (OuterVolumeSpecName: "kube-api-access-56b85") pod "18658aa6-a9b1-4277-80ba-304ac1a91ab0" (UID: "18658aa6-a9b1-4277-80ba-304ac1a91ab0"). InnerVolumeSpecName "kube-api-access-56b85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.930638 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-995xw\" (UniqueName: \"kubernetes.io/projected/83dd0ea2-f060-47a7-822e-5f6a8f605df5-kube-api-access-995xw\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:35 crc kubenswrapper[4932]: I1125 10:26:35.930680 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56b85\" (UniqueName: \"kubernetes.io/projected/18658aa6-a9b1-4277-80ba-304ac1a91ab0-kube-api-access-56b85\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:36 crc kubenswrapper[4932]: I1125 10:26:36.324498 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f085-account-create-x4kwg" event={"ID":"18658aa6-a9b1-4277-80ba-304ac1a91ab0","Type":"ContainerDied","Data":"c6df11fb225dad353defcd65e0004dbabb8afd03d32110792b5887e82d2abd08"} Nov 25 10:26:36 crc kubenswrapper[4932]: I1125 10:26:36.324557 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c6df11fb225dad353defcd65e0004dbabb8afd03d32110792b5887e82d2abd08" Nov 25 10:26:36 crc kubenswrapper[4932]: I1125 10:26:36.324513 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f085-account-create-x4kwg" Nov 25 10:26:36 crc kubenswrapper[4932]: I1125 10:26:36.325708 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-njdms" event={"ID":"83dd0ea2-f060-47a7-822e-5f6a8f605df5","Type":"ContainerDied","Data":"6d7aa15f65e1b44d6732f351f5248ccd46d47db91b5bface13141852b538cc02"} Nov 25 10:26:36 crc kubenswrapper[4932]: I1125 10:26:36.325747 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d7aa15f65e1b44d6732f351f5248ccd46d47db91b5bface13141852b538cc02" Nov 25 10:26:36 crc kubenswrapper[4932]: I1125 10:26:36.325799 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-njdms" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.005237 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59666c9775-qp4j2"] Nov 25 10:26:38 crc kubenswrapper[4932]: E1125 10:26:38.006023 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83dd0ea2-f060-47a7-822e-5f6a8f605df5" containerName="mariadb-database-create" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.006041 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="83dd0ea2-f060-47a7-822e-5f6a8f605df5" containerName="mariadb-database-create" Nov 25 10:26:38 crc kubenswrapper[4932]: E1125 10:26:38.006054 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18658aa6-a9b1-4277-80ba-304ac1a91ab0" containerName="mariadb-account-create" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.006061 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="18658aa6-a9b1-4277-80ba-304ac1a91ab0" containerName="mariadb-account-create" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.006309 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="83dd0ea2-f060-47a7-822e-5f6a8f605df5" containerName="mariadb-database-create" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.006325 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="18658aa6-a9b1-4277-80ba-304ac1a91ab0" containerName="mariadb-account-create" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.007554 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.029974 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59666c9775-qp4j2"] Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.070625 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-sb\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.070845 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-nb\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.070927 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-dns-svc\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.071079 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dm6sg\" (UniqueName: \"kubernetes.io/projected/b63de457-6eac-4a4d-b603-86232313867c-kube-api-access-dm6sg\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.071203 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-config\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.075682 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-q2944"] Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.076965 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.079916 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-74crs" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.080323 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.080941 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.084641 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-q2944"] Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.172670 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/905207e2-07d5-4d64-bd4e-e6459f5e9827-logs\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.172991 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dm6sg\" (UniqueName: \"kubernetes.io/projected/b63de457-6eac-4a4d-b603-86232313867c-kube-api-access-dm6sg\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.173344 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-combined-ca-bundle\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.173556 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-config\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.173671 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-scripts\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.173833 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-sb\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.173953 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pq82d\" (UniqueName: \"kubernetes.io/projected/905207e2-07d5-4d64-bd4e-e6459f5e9827-kube-api-access-pq82d\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.174046 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-nb\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.174141 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-config-data\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.174262 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-dns-svc\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.175404 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-dns-svc\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.177156 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-nb\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.178520 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-config\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.178839 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-sb\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.194676 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dm6sg\" (UniqueName: \"kubernetes.io/projected/b63de457-6eac-4a4d-b603-86232313867c-kube-api-access-dm6sg\") pod \"dnsmasq-dns-59666c9775-qp4j2\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.275823 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/905207e2-07d5-4d64-bd4e-e6459f5e9827-logs\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.276236 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-combined-ca-bundle\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.276288 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-scripts\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.276321 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pq82d\" (UniqueName: \"kubernetes.io/projected/905207e2-07d5-4d64-bd4e-e6459f5e9827-kube-api-access-pq82d\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.276352 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-config-data\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.276378 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/905207e2-07d5-4d64-bd4e-e6459f5e9827-logs\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.279252 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-scripts\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.279936 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-config-data\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.280925 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-combined-ca-bundle\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.294322 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pq82d\" (UniqueName: \"kubernetes.io/projected/905207e2-07d5-4d64-bd4e-e6459f5e9827-kube-api-access-pq82d\") pod \"placement-db-sync-q2944\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.340221 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.400796 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-q2944" Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.610754 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:26:38 crc kubenswrapper[4932]: E1125 10:26:38.610974 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:26:38 crc kubenswrapper[4932]: W1125 10:26:38.943875 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod905207e2_07d5_4d64_bd4e_e6459f5e9827.slice/crio-b600be8c188e6c18aeb0c06134f30caab5bff9762749aa26c4218b039b788066 WatchSource:0}: Error finding container b600be8c188e6c18aeb0c06134f30caab5bff9762749aa26c4218b039b788066: Status 404 returned error can't find the container with id b600be8c188e6c18aeb0c06134f30caab5bff9762749aa26c4218b039b788066 Nov 25 10:26:38 crc kubenswrapper[4932]: I1125 10:26:38.945293 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-q2944"] Nov 25 10:26:39 crc kubenswrapper[4932]: W1125 10:26:39.046876 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb63de457_6eac_4a4d_b603_86232313867c.slice/crio-62a7b51e613d8173b296bfc77f38f7f3a729993bd449101363b5711a86f267f7 WatchSource:0}: Error finding container 62a7b51e613d8173b296bfc77f38f7f3a729993bd449101363b5711a86f267f7: Status 404 returned error can't find the container with id 62a7b51e613d8173b296bfc77f38f7f3a729993bd449101363b5711a86f267f7 Nov 25 10:26:39 crc kubenswrapper[4932]: I1125 10:26:39.047965 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59666c9775-qp4j2"] Nov 25 10:26:39 crc kubenswrapper[4932]: I1125 10:26:39.357502 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-q2944" event={"ID":"905207e2-07d5-4d64-bd4e-e6459f5e9827","Type":"ContainerStarted","Data":"6ade566e33724ea28b86f8bca09d1a9fb9ed2e2ad700d8545ecf2f63326764fc"} Nov 25 10:26:39 crc kubenswrapper[4932]: I1125 10:26:39.357557 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-q2944" event={"ID":"905207e2-07d5-4d64-bd4e-e6459f5e9827","Type":"ContainerStarted","Data":"b600be8c188e6c18aeb0c06134f30caab5bff9762749aa26c4218b039b788066"} Nov 25 10:26:39 crc kubenswrapper[4932]: I1125 10:26:39.360363 4932 generic.go:334] "Generic (PLEG): container finished" podID="b63de457-6eac-4a4d-b603-86232313867c" containerID="6db83b93874e910ef50f7f8d8a20472a8f6847edff1d2bfc094cacad2c28c973" exitCode=0 Nov 25 10:26:39 crc kubenswrapper[4932]: I1125 10:26:39.360407 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" event={"ID":"b63de457-6eac-4a4d-b603-86232313867c","Type":"ContainerDied","Data":"6db83b93874e910ef50f7f8d8a20472a8f6847edff1d2bfc094cacad2c28c973"} Nov 25 10:26:39 crc kubenswrapper[4932]: I1125 10:26:39.360434 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" event={"ID":"b63de457-6eac-4a4d-b603-86232313867c","Type":"ContainerStarted","Data":"62a7b51e613d8173b296bfc77f38f7f3a729993bd449101363b5711a86f267f7"} Nov 25 10:26:39 crc kubenswrapper[4932]: I1125 10:26:39.380846 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-q2944" podStartSLOduration=1.380825965 podStartE2EDuration="1.380825965s" podCreationTimestamp="2025-11-25 10:26:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:26:39.374473183 +0000 UTC m=+5859.500502756" watchObservedRunningTime="2025-11-25 10:26:39.380825965 +0000 UTC m=+5859.506855528" Nov 25 10:26:40 crc kubenswrapper[4932]: I1125 10:26:40.373907 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" event={"ID":"b63de457-6eac-4a4d-b603-86232313867c","Type":"ContainerStarted","Data":"94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d"} Nov 25 10:26:40 crc kubenswrapper[4932]: I1125 10:26:40.374153 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:40 crc kubenswrapper[4932]: I1125 10:26:40.403400 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" podStartSLOduration=3.403376576 podStartE2EDuration="3.403376576s" podCreationTimestamp="2025-11-25 10:26:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:26:40.393438152 +0000 UTC m=+5860.519467715" watchObservedRunningTime="2025-11-25 10:26:40.403376576 +0000 UTC m=+5860.529406139" Nov 25 10:26:42 crc kubenswrapper[4932]: I1125 10:26:42.390994 4932 generic.go:334] "Generic (PLEG): container finished" podID="905207e2-07d5-4d64-bd4e-e6459f5e9827" containerID="6ade566e33724ea28b86f8bca09d1a9fb9ed2e2ad700d8545ecf2f63326764fc" exitCode=0 Nov 25 10:26:42 crc kubenswrapper[4932]: I1125 10:26:42.391060 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-q2944" event={"ID":"905207e2-07d5-4d64-bd4e-e6459f5e9827","Type":"ContainerDied","Data":"6ade566e33724ea28b86f8bca09d1a9fb9ed2e2ad700d8545ecf2f63326764fc"} Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.747978 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-q2944" Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.777788 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/905207e2-07d5-4d64-bd4e-e6459f5e9827-logs\") pod \"905207e2-07d5-4d64-bd4e-e6459f5e9827\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.778008 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pq82d\" (UniqueName: \"kubernetes.io/projected/905207e2-07d5-4d64-bd4e-e6459f5e9827-kube-api-access-pq82d\") pod \"905207e2-07d5-4d64-bd4e-e6459f5e9827\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.778051 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-combined-ca-bundle\") pod \"905207e2-07d5-4d64-bd4e-e6459f5e9827\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.778074 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-config-data\") pod \"905207e2-07d5-4d64-bd4e-e6459f5e9827\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.778171 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-scripts\") pod \"905207e2-07d5-4d64-bd4e-e6459f5e9827\" (UID: \"905207e2-07d5-4d64-bd4e-e6459f5e9827\") " Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.778926 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/905207e2-07d5-4d64-bd4e-e6459f5e9827-logs" (OuterVolumeSpecName: "logs") pod "905207e2-07d5-4d64-bd4e-e6459f5e9827" (UID: "905207e2-07d5-4d64-bd4e-e6459f5e9827"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.779140 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/905207e2-07d5-4d64-bd4e-e6459f5e9827-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.783399 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-scripts" (OuterVolumeSpecName: "scripts") pod "905207e2-07d5-4d64-bd4e-e6459f5e9827" (UID: "905207e2-07d5-4d64-bd4e-e6459f5e9827"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.784273 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/905207e2-07d5-4d64-bd4e-e6459f5e9827-kube-api-access-pq82d" (OuterVolumeSpecName: "kube-api-access-pq82d") pod "905207e2-07d5-4d64-bd4e-e6459f5e9827" (UID: "905207e2-07d5-4d64-bd4e-e6459f5e9827"). InnerVolumeSpecName "kube-api-access-pq82d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.804459 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-config-data" (OuterVolumeSpecName: "config-data") pod "905207e2-07d5-4d64-bd4e-e6459f5e9827" (UID: "905207e2-07d5-4d64-bd4e-e6459f5e9827"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.806097 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "905207e2-07d5-4d64-bd4e-e6459f5e9827" (UID: "905207e2-07d5-4d64-bd4e-e6459f5e9827"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.880632 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pq82d\" (UniqueName: \"kubernetes.io/projected/905207e2-07d5-4d64-bd4e-e6459f5e9827-kube-api-access-pq82d\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.880677 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.880691 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:43 crc kubenswrapper[4932]: I1125 10:26:43.880703 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/905207e2-07d5-4d64-bd4e-e6459f5e9827-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.408001 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-q2944" event={"ID":"905207e2-07d5-4d64-bd4e-e6459f5e9827","Type":"ContainerDied","Data":"b600be8c188e6c18aeb0c06134f30caab5bff9762749aa26c4218b039b788066"} Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.408041 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b600be8c188e6c18aeb0c06134f30caab5bff9762749aa26c4218b039b788066" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.408056 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-q2944" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.583664 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7f8bcd5cbd-vdf7k"] Nov 25 10:26:44 crc kubenswrapper[4932]: E1125 10:26:44.584037 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="905207e2-07d5-4d64-bd4e-e6459f5e9827" containerName="placement-db-sync" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.584053 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="905207e2-07d5-4d64-bd4e-e6459f5e9827" containerName="placement-db-sync" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.584310 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="905207e2-07d5-4d64-bd4e-e6459f5e9827" containerName="placement-db-sync" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.585321 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.588080 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.588410 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-74crs" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.588570 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.588683 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.588764 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.622156 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7f8bcd5cbd-vdf7k"] Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.699366 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-combined-ca-bundle\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.699494 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-internal-tls-certs\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.699526 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-config-data\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.699570 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-scripts\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.699649 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-public-tls-certs\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.699693 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fcfa86f6-2217-45c5-a12b-477b1b53682a-logs\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.699722 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbp28\" (UniqueName: \"kubernetes.io/projected/fcfa86f6-2217-45c5-a12b-477b1b53682a-kube-api-access-hbp28\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.801750 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fcfa86f6-2217-45c5-a12b-477b1b53682a-logs\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.801826 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbp28\" (UniqueName: \"kubernetes.io/projected/fcfa86f6-2217-45c5-a12b-477b1b53682a-kube-api-access-hbp28\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.801881 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-combined-ca-bundle\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.801965 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-internal-tls-certs\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.802028 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-config-data\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.802074 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-scripts\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.802154 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-public-tls-certs\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.802320 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fcfa86f6-2217-45c5-a12b-477b1b53682a-logs\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.810972 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-internal-tls-certs\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.811405 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-public-tls-certs\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.811428 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-combined-ca-bundle\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.812476 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-scripts\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.822997 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbp28\" (UniqueName: \"kubernetes.io/projected/fcfa86f6-2217-45c5-a12b-477b1b53682a-kube-api-access-hbp28\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.823662 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fcfa86f6-2217-45c5-a12b-477b1b53682a-config-data\") pod \"placement-7f8bcd5cbd-vdf7k\" (UID: \"fcfa86f6-2217-45c5-a12b-477b1b53682a\") " pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:44 crc kubenswrapper[4932]: I1125 10:26:44.912507 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:45 crc kubenswrapper[4932]: I1125 10:26:45.540865 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7f8bcd5cbd-vdf7k"] Nov 25 10:26:46 crc kubenswrapper[4932]: I1125 10:26:46.448973 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7f8bcd5cbd-vdf7k" event={"ID":"fcfa86f6-2217-45c5-a12b-477b1b53682a","Type":"ContainerStarted","Data":"6219c7dd69e4a2d4cc08bfc92d290ea31b0ce5db90f1d2f3c7edf06f52da9e9d"} Nov 25 10:26:46 crc kubenswrapper[4932]: I1125 10:26:46.449552 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7f8bcd5cbd-vdf7k" event={"ID":"fcfa86f6-2217-45c5-a12b-477b1b53682a","Type":"ContainerStarted","Data":"069e1b4c9af5597c5a6cabfa630a975cf2488931b38051fd9ba6a68fcf27f237"} Nov 25 10:26:46 crc kubenswrapper[4932]: I1125 10:26:46.449572 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:46 crc kubenswrapper[4932]: I1125 10:26:46.449583 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7f8bcd5cbd-vdf7k" event={"ID":"fcfa86f6-2217-45c5-a12b-477b1b53682a","Type":"ContainerStarted","Data":"2460cd28843ccd42ba9a5426282948336377a03bb12707d29d87f9ada0c127d9"} Nov 25 10:26:46 crc kubenswrapper[4932]: I1125 10:26:46.475833 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-7f8bcd5cbd-vdf7k" podStartSLOduration=2.475809914 podStartE2EDuration="2.475809914s" podCreationTimestamp="2025-11-25 10:26:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:26:46.468369251 +0000 UTC m=+5866.594398814" watchObservedRunningTime="2025-11-25 10:26:46.475809914 +0000 UTC m=+5866.601839477" Nov 25 10:26:47 crc kubenswrapper[4932]: I1125 10:26:47.456813 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:26:48 crc kubenswrapper[4932]: I1125 10:26:48.342072 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:26:48 crc kubenswrapper[4932]: I1125 10:26:48.406560 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6768c6b4f9-rj59q"] Nov 25 10:26:48 crc kubenswrapper[4932]: I1125 10:26:48.406828 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" podUID="bba26e41-4fca-4967-b428-7c036676225c" containerName="dnsmasq-dns" containerID="cri-o://d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3" gracePeriod=10 Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.403243 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.489853 4932 generic.go:334] "Generic (PLEG): container finished" podID="bba26e41-4fca-4967-b428-7c036676225c" containerID="d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3" exitCode=0 Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.489906 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" event={"ID":"bba26e41-4fca-4967-b428-7c036676225c","Type":"ContainerDied","Data":"d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3"} Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.489945 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.489961 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6768c6b4f9-rj59q" event={"ID":"bba26e41-4fca-4967-b428-7c036676225c","Type":"ContainerDied","Data":"f9bb1a49282d92297bef1182b44c7d36c501e4477e9e3c865848bc850cfadeac"} Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.489986 4932 scope.go:117] "RemoveContainer" containerID="d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.497632 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-config\") pod \"bba26e41-4fca-4967-b428-7c036676225c\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.497699 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-dns-svc\") pod \"bba26e41-4fca-4967-b428-7c036676225c\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.497829 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-sb\") pod \"bba26e41-4fca-4967-b428-7c036676225c\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.497965 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjcwd\" (UniqueName: \"kubernetes.io/projected/bba26e41-4fca-4967-b428-7c036676225c-kube-api-access-hjcwd\") pod \"bba26e41-4fca-4967-b428-7c036676225c\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.498010 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-nb\") pod \"bba26e41-4fca-4967-b428-7c036676225c\" (UID: \"bba26e41-4fca-4967-b428-7c036676225c\") " Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.514600 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bba26e41-4fca-4967-b428-7c036676225c-kube-api-access-hjcwd" (OuterVolumeSpecName: "kube-api-access-hjcwd") pod "bba26e41-4fca-4967-b428-7c036676225c" (UID: "bba26e41-4fca-4967-b428-7c036676225c"). InnerVolumeSpecName "kube-api-access-hjcwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.523553 4932 scope.go:117] "RemoveContainer" containerID="d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.570317 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bba26e41-4fca-4967-b428-7c036676225c" (UID: "bba26e41-4fca-4967-b428-7c036676225c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.570666 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bba26e41-4fca-4967-b428-7c036676225c" (UID: "bba26e41-4fca-4967-b428-7c036676225c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.589263 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-config" (OuterVolumeSpecName: "config") pod "bba26e41-4fca-4967-b428-7c036676225c" (UID: "bba26e41-4fca-4967-b428-7c036676225c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.591507 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bba26e41-4fca-4967-b428-7c036676225c" (UID: "bba26e41-4fca-4967-b428-7c036676225c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.600209 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjcwd\" (UniqueName: \"kubernetes.io/projected/bba26e41-4fca-4967-b428-7c036676225c-kube-api-access-hjcwd\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.600247 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.600261 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.600273 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.600283 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bba26e41-4fca-4967-b428-7c036676225c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.706037 4932 scope.go:117] "RemoveContainer" containerID="d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3" Nov 25 10:26:49 crc kubenswrapper[4932]: E1125 10:26:49.709172 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3\": container with ID starting with d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3 not found: ID does not exist" containerID="d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.709256 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3"} err="failed to get container status \"d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3\": rpc error: code = NotFound desc = could not find container \"d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3\": container with ID starting with d3f4c6c69347fca446f9ba1a554f85f3e3b460d13900f88ac4d997f7cf0185f3 not found: ID does not exist" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.709289 4932 scope.go:117] "RemoveContainer" containerID="d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e" Nov 25 10:26:49 crc kubenswrapper[4932]: E1125 10:26:49.713273 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e\": container with ID starting with d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e not found: ID does not exist" containerID="d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.713314 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e"} err="failed to get container status \"d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e\": rpc error: code = NotFound desc = could not find container \"d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e\": container with ID starting with d41af701a9c026b9ea4709204c9ca49528f3e9acabeb3b6c6cc6c177f057731e not found: ID does not exist" Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.824997 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6768c6b4f9-rj59q"] Nov 25 10:26:49 crc kubenswrapper[4932]: I1125 10:26:49.832373 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6768c6b4f9-rj59q"] Nov 25 10:26:50 crc kubenswrapper[4932]: I1125 10:26:50.618547 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bba26e41-4fca-4967-b428-7c036676225c" path="/var/lib/kubelet/pods/bba26e41-4fca-4967-b428-7c036676225c/volumes" Nov 25 10:26:53 crc kubenswrapper[4932]: I1125 10:26:53.605981 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:26:53 crc kubenswrapper[4932]: E1125 10:26:53.606546 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:27:05 crc kubenswrapper[4932]: I1125 10:27:05.606675 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:27:05 crc kubenswrapper[4932]: E1125 10:27:05.607432 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:27:15 crc kubenswrapper[4932]: I1125 10:27:15.993868 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:27:16 crc kubenswrapper[4932]: I1125 10:27:16.005011 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7f8bcd5cbd-vdf7k" Nov 25 10:27:20 crc kubenswrapper[4932]: I1125 10:27:20.605614 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:27:20 crc kubenswrapper[4932]: E1125 10:27:20.606380 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:27:33 crc kubenswrapper[4932]: I1125 10:27:33.606462 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:27:33 crc kubenswrapper[4932]: E1125 10:27:33.607266 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:27:39 crc kubenswrapper[4932]: I1125 10:27:39.914743 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-ljdqm"] Nov 25 10:27:39 crc kubenswrapper[4932]: E1125 10:27:39.915835 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bba26e41-4fca-4967-b428-7c036676225c" containerName="init" Nov 25 10:27:39 crc kubenswrapper[4932]: I1125 10:27:39.915856 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="bba26e41-4fca-4967-b428-7c036676225c" containerName="init" Nov 25 10:27:39 crc kubenswrapper[4932]: E1125 10:27:39.915873 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bba26e41-4fca-4967-b428-7c036676225c" containerName="dnsmasq-dns" Nov 25 10:27:39 crc kubenswrapper[4932]: I1125 10:27:39.915881 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="bba26e41-4fca-4967-b428-7c036676225c" containerName="dnsmasq-dns" Nov 25 10:27:39 crc kubenswrapper[4932]: I1125 10:27:39.916098 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="bba26e41-4fca-4967-b428-7c036676225c" containerName="dnsmasq-dns" Nov 25 10:27:39 crc kubenswrapper[4932]: I1125 10:27:39.916911 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ljdqm" Nov 25 10:27:39 crc kubenswrapper[4932]: I1125 10:27:39.931800 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-ljdqm"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.009741 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-fw5g6"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.010977 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fw5g6" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.020945 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-fw5g6"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.089641 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxkdd\" (UniqueName: \"kubernetes.io/projected/c76fa982-897c-4948-8ad7-a111c8399e74-kube-api-access-rxkdd\") pod \"nova-api-db-create-ljdqm\" (UID: \"c76fa982-897c-4948-8ad7-a111c8399e74\") " pod="openstack/nova-api-db-create-ljdqm" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.089826 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c76fa982-897c-4948-8ad7-a111c8399e74-operator-scripts\") pod \"nova-api-db-create-ljdqm\" (UID: \"c76fa982-897c-4948-8ad7-a111c8399e74\") " pod="openstack/nova-api-db-create-ljdqm" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.120062 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-3891-account-create-8qvzt"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.121582 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3891-account-create-8qvzt" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.123390 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.129989 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-db76q"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.131385 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-db76q" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.141940 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-3891-account-create-8qvzt"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.150346 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-db76q"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.191750 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4z4g4\" (UniqueName: \"kubernetes.io/projected/a5418610-a8c5-42e4-a5df-14af19703a42-kube-api-access-4z4g4\") pod \"nova-cell0-db-create-fw5g6\" (UID: \"a5418610-a8c5-42e4-a5df-14af19703a42\") " pod="openstack/nova-cell0-db-create-fw5g6" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.191875 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c76fa982-897c-4948-8ad7-a111c8399e74-operator-scripts\") pod \"nova-api-db-create-ljdqm\" (UID: \"c76fa982-897c-4948-8ad7-a111c8399e74\") " pod="openstack/nova-api-db-create-ljdqm" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.191996 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxkdd\" (UniqueName: \"kubernetes.io/projected/c76fa982-897c-4948-8ad7-a111c8399e74-kube-api-access-rxkdd\") pod \"nova-api-db-create-ljdqm\" (UID: \"c76fa982-897c-4948-8ad7-a111c8399e74\") " pod="openstack/nova-api-db-create-ljdqm" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.192053 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5418610-a8c5-42e4-a5df-14af19703a42-operator-scripts\") pod \"nova-cell0-db-create-fw5g6\" (UID: \"a5418610-a8c5-42e4-a5df-14af19703a42\") " pod="openstack/nova-cell0-db-create-fw5g6" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.192591 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c76fa982-897c-4948-8ad7-a111c8399e74-operator-scripts\") pod \"nova-api-db-create-ljdqm\" (UID: \"c76fa982-897c-4948-8ad7-a111c8399e74\") " pod="openstack/nova-api-db-create-ljdqm" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.217263 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxkdd\" (UniqueName: \"kubernetes.io/projected/c76fa982-897c-4948-8ad7-a111c8399e74-kube-api-access-rxkdd\") pod \"nova-api-db-create-ljdqm\" (UID: \"c76fa982-897c-4948-8ad7-a111c8399e74\") " pod="openstack/nova-api-db-create-ljdqm" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.233619 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ljdqm" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.305171 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kx9h2\" (UniqueName: \"kubernetes.io/projected/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-kube-api-access-kx9h2\") pod \"nova-api-3891-account-create-8qvzt\" (UID: \"9b647685-9f5e-4dcb-a0a8-4739ecfb6330\") " pod="openstack/nova-api-3891-account-create-8qvzt" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.305636 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5418610-a8c5-42e4-a5df-14af19703a42-operator-scripts\") pod \"nova-cell0-db-create-fw5g6\" (UID: \"a5418610-a8c5-42e4-a5df-14af19703a42\") " pod="openstack/nova-cell0-db-create-fw5g6" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.305721 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhhxk\" (UniqueName: \"kubernetes.io/projected/aeb1135f-f2d3-4259-ae52-39f9df4a5582-kube-api-access-zhhxk\") pod \"nova-cell1-db-create-db76q\" (UID: \"aeb1135f-f2d3-4259-ae52-39f9df4a5582\") " pod="openstack/nova-cell1-db-create-db76q" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.305779 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aeb1135f-f2d3-4259-ae52-39f9df4a5582-operator-scripts\") pod \"nova-cell1-db-create-db76q\" (UID: \"aeb1135f-f2d3-4259-ae52-39f9df4a5582\") " pod="openstack/nova-cell1-db-create-db76q" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.305825 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-operator-scripts\") pod \"nova-api-3891-account-create-8qvzt\" (UID: \"9b647685-9f5e-4dcb-a0a8-4739ecfb6330\") " pod="openstack/nova-api-3891-account-create-8qvzt" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.305861 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4z4g4\" (UniqueName: \"kubernetes.io/projected/a5418610-a8c5-42e4-a5df-14af19703a42-kube-api-access-4z4g4\") pod \"nova-cell0-db-create-fw5g6\" (UID: \"a5418610-a8c5-42e4-a5df-14af19703a42\") " pod="openstack/nova-cell0-db-create-fw5g6" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.306855 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5418610-a8c5-42e4-a5df-14af19703a42-operator-scripts\") pod \"nova-cell0-db-create-fw5g6\" (UID: \"a5418610-a8c5-42e4-a5df-14af19703a42\") " pod="openstack/nova-cell0-db-create-fw5g6" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.333998 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4z4g4\" (UniqueName: \"kubernetes.io/projected/a5418610-a8c5-42e4-a5df-14af19703a42-kube-api-access-4z4g4\") pod \"nova-cell0-db-create-fw5g6\" (UID: \"a5418610-a8c5-42e4-a5df-14af19703a42\") " pod="openstack/nova-cell0-db-create-fw5g6" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.342025 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-c672-account-create-npp8v"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.343387 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c672-account-create-npp8v" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.345551 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.359846 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-c672-account-create-npp8v"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.418262 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-operator-scripts\") pod \"nova-api-3891-account-create-8qvzt\" (UID: \"9b647685-9f5e-4dcb-a0a8-4739ecfb6330\") " pod="openstack/nova-api-3891-account-create-8qvzt" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.418713 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kx9h2\" (UniqueName: \"kubernetes.io/projected/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-kube-api-access-kx9h2\") pod \"nova-api-3891-account-create-8qvzt\" (UID: \"9b647685-9f5e-4dcb-a0a8-4739ecfb6330\") " pod="openstack/nova-api-3891-account-create-8qvzt" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.418776 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhhxk\" (UniqueName: \"kubernetes.io/projected/aeb1135f-f2d3-4259-ae52-39f9df4a5582-kube-api-access-zhhxk\") pod \"nova-cell1-db-create-db76q\" (UID: \"aeb1135f-f2d3-4259-ae52-39f9df4a5582\") " pod="openstack/nova-cell1-db-create-db76q" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.418822 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aeb1135f-f2d3-4259-ae52-39f9df4a5582-operator-scripts\") pod \"nova-cell1-db-create-db76q\" (UID: \"aeb1135f-f2d3-4259-ae52-39f9df4a5582\") " pod="openstack/nova-cell1-db-create-db76q" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.420116 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aeb1135f-f2d3-4259-ae52-39f9df4a5582-operator-scripts\") pod \"nova-cell1-db-create-db76q\" (UID: \"aeb1135f-f2d3-4259-ae52-39f9df4a5582\") " pod="openstack/nova-cell1-db-create-db76q" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.421024 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-operator-scripts\") pod \"nova-api-3891-account-create-8qvzt\" (UID: \"9b647685-9f5e-4dcb-a0a8-4739ecfb6330\") " pod="openstack/nova-api-3891-account-create-8qvzt" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.436219 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kx9h2\" (UniqueName: \"kubernetes.io/projected/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-kube-api-access-kx9h2\") pod \"nova-api-3891-account-create-8qvzt\" (UID: \"9b647685-9f5e-4dcb-a0a8-4739ecfb6330\") " pod="openstack/nova-api-3891-account-create-8qvzt" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.436732 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhhxk\" (UniqueName: \"kubernetes.io/projected/aeb1135f-f2d3-4259-ae52-39f9df4a5582-kube-api-access-zhhxk\") pod \"nova-cell1-db-create-db76q\" (UID: \"aeb1135f-f2d3-4259-ae52-39f9df4a5582\") " pod="openstack/nova-cell1-db-create-db76q" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.442892 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3891-account-create-8qvzt" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.461941 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-db76q" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.518093 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-3179-account-create-fznsx"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.519266 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3179-account-create-fznsx" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.520909 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb900453-5ad2-4591-a371-59be9fe5f5f4-operator-scripts\") pod \"nova-cell0-c672-account-create-npp8v\" (UID: \"eb900453-5ad2-4591-a371-59be9fe5f5f4\") " pod="openstack/nova-cell0-c672-account-create-npp8v" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.520981 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnqqr\" (UniqueName: \"kubernetes.io/projected/eb900453-5ad2-4591-a371-59be9fe5f5f4-kube-api-access-dnqqr\") pod \"nova-cell0-c672-account-create-npp8v\" (UID: \"eb900453-5ad2-4591-a371-59be9fe5f5f4\") " pod="openstack/nova-cell0-c672-account-create-npp8v" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.523380 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.542563 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-3179-account-create-fznsx"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.628623 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fw5g6" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.629093 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-operator-scripts\") pod \"nova-cell1-3179-account-create-fznsx\" (UID: \"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8\") " pod="openstack/nova-cell1-3179-account-create-fznsx" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.629157 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjhzf\" (UniqueName: \"kubernetes.io/projected/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-kube-api-access-gjhzf\") pod \"nova-cell1-3179-account-create-fznsx\" (UID: \"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8\") " pod="openstack/nova-cell1-3179-account-create-fznsx" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.631089 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb900453-5ad2-4591-a371-59be9fe5f5f4-operator-scripts\") pod \"nova-cell0-c672-account-create-npp8v\" (UID: \"eb900453-5ad2-4591-a371-59be9fe5f5f4\") " pod="openstack/nova-cell0-c672-account-create-npp8v" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.631217 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnqqr\" (UniqueName: \"kubernetes.io/projected/eb900453-5ad2-4591-a371-59be9fe5f5f4-kube-api-access-dnqqr\") pod \"nova-cell0-c672-account-create-npp8v\" (UID: \"eb900453-5ad2-4591-a371-59be9fe5f5f4\") " pod="openstack/nova-cell0-c672-account-create-npp8v" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.632127 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb900453-5ad2-4591-a371-59be9fe5f5f4-operator-scripts\") pod \"nova-cell0-c672-account-create-npp8v\" (UID: \"eb900453-5ad2-4591-a371-59be9fe5f5f4\") " pod="openstack/nova-cell0-c672-account-create-npp8v" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.652456 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnqqr\" (UniqueName: \"kubernetes.io/projected/eb900453-5ad2-4591-a371-59be9fe5f5f4-kube-api-access-dnqqr\") pod \"nova-cell0-c672-account-create-npp8v\" (UID: \"eb900453-5ad2-4591-a371-59be9fe5f5f4\") " pod="openstack/nova-cell0-c672-account-create-npp8v" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.717614 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c672-account-create-npp8v" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.732662 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-operator-scripts\") pod \"nova-cell1-3179-account-create-fznsx\" (UID: \"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8\") " pod="openstack/nova-cell1-3179-account-create-fznsx" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.732736 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjhzf\" (UniqueName: \"kubernetes.io/projected/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-kube-api-access-gjhzf\") pod \"nova-cell1-3179-account-create-fznsx\" (UID: \"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8\") " pod="openstack/nova-cell1-3179-account-create-fznsx" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.733768 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-operator-scripts\") pod \"nova-cell1-3179-account-create-fznsx\" (UID: \"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8\") " pod="openstack/nova-cell1-3179-account-create-fznsx" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.750049 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjhzf\" (UniqueName: \"kubernetes.io/projected/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-kube-api-access-gjhzf\") pod \"nova-cell1-3179-account-create-fznsx\" (UID: \"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8\") " pod="openstack/nova-cell1-3179-account-create-fznsx" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.761543 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-ljdqm"] Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.844780 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3179-account-create-fznsx" Nov 25 10:27:40 crc kubenswrapper[4932]: I1125 10:27:40.933339 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ljdqm" event={"ID":"c76fa982-897c-4948-8ad7-a111c8399e74","Type":"ContainerStarted","Data":"ed116b867670386ba2aa4277103478f724d481afac252c408e529775c6e6cf0b"} Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.011232 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-db76q"] Nov 25 10:27:41 crc kubenswrapper[4932]: W1125 10:27:41.016545 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaeb1135f_f2d3_4259_ae52_39f9df4a5582.slice/crio-54354d01f248366f90d716a9d6dfd63e679fa0579dab8564393376eae05aa155 WatchSource:0}: Error finding container 54354d01f248366f90d716a9d6dfd63e679fa0579dab8564393376eae05aa155: Status 404 returned error can't find the container with id 54354d01f248366f90d716a9d6dfd63e679fa0579dab8564393376eae05aa155 Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.021541 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-3891-account-create-8qvzt"] Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.199109 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-fw5g6"] Nov 25 10:27:41 crc kubenswrapper[4932]: W1125 10:27:41.261037 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5418610_a8c5_42e4_a5df_14af19703a42.slice/crio-67aceb4960faa2f9a17fe81e86ea2806de9a124304cb094b0189e917f753b8f2 WatchSource:0}: Error finding container 67aceb4960faa2f9a17fe81e86ea2806de9a124304cb094b0189e917f753b8f2: Status 404 returned error can't find the container with id 67aceb4960faa2f9a17fe81e86ea2806de9a124304cb094b0189e917f753b8f2 Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.310318 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-c672-account-create-npp8v"] Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.382432 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-3179-account-create-fznsx"] Nov 25 10:27:41 crc kubenswrapper[4932]: W1125 10:27:41.386815 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd469507_d0a9_4a7d_9a60_2dd43e09c8a8.slice/crio-9536e0d63693287765014412841a16c683dd51e879d9e12f0fcc7162c6004ddb WatchSource:0}: Error finding container 9536e0d63693287765014412841a16c683dd51e879d9e12f0fcc7162c6004ddb: Status 404 returned error can't find the container with id 9536e0d63693287765014412841a16c683dd51e879d9e12f0fcc7162c6004ddb Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.950487 4932 generic.go:334] "Generic (PLEG): container finished" podID="aeb1135f-f2d3-4259-ae52-39f9df4a5582" containerID="68aca14ebe649e4d42b3757d267d9f35de851988f5416df22e6347f907827e7d" exitCode=0 Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.950548 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-db76q" event={"ID":"aeb1135f-f2d3-4259-ae52-39f9df4a5582","Type":"ContainerDied","Data":"68aca14ebe649e4d42b3757d267d9f35de851988f5416df22e6347f907827e7d"} Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.950574 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-db76q" event={"ID":"aeb1135f-f2d3-4259-ae52-39f9df4a5582","Type":"ContainerStarted","Data":"54354d01f248366f90d716a9d6dfd63e679fa0579dab8564393376eae05aa155"} Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.954769 4932 generic.go:334] "Generic (PLEG): container finished" podID="bd469507-d0a9-4a7d-9a60-2dd43e09c8a8" containerID="beec379a0c6b93f3ab1b3875a8bcb93dd45b3bebd60aed89eda8d346216e081f" exitCode=0 Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.954832 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3179-account-create-fznsx" event={"ID":"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8","Type":"ContainerDied","Data":"beec379a0c6b93f3ab1b3875a8bcb93dd45b3bebd60aed89eda8d346216e081f"} Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.954857 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3179-account-create-fznsx" event={"ID":"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8","Type":"ContainerStarted","Data":"9536e0d63693287765014412841a16c683dd51e879d9e12f0fcc7162c6004ddb"} Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.957229 4932 generic.go:334] "Generic (PLEG): container finished" podID="c76fa982-897c-4948-8ad7-a111c8399e74" containerID="d56bd976dc36285a7b61db82648c2c784c3778d8af93391c304870b55d07e4cc" exitCode=0 Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.957338 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ljdqm" event={"ID":"c76fa982-897c-4948-8ad7-a111c8399e74","Type":"ContainerDied","Data":"d56bd976dc36285a7b61db82648c2c784c3778d8af93391c304870b55d07e4cc"} Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.958861 4932 generic.go:334] "Generic (PLEG): container finished" podID="a5418610-a8c5-42e4-a5df-14af19703a42" containerID="b4cc984fe03c119ea482429bc2da34118c0cd93ee0c8a55b628d2f018656c444" exitCode=0 Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.958921 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-fw5g6" event={"ID":"a5418610-a8c5-42e4-a5df-14af19703a42","Type":"ContainerDied","Data":"b4cc984fe03c119ea482429bc2da34118c0cd93ee0c8a55b628d2f018656c444"} Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.958996 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-fw5g6" event={"ID":"a5418610-a8c5-42e4-a5df-14af19703a42","Type":"ContainerStarted","Data":"67aceb4960faa2f9a17fe81e86ea2806de9a124304cb094b0189e917f753b8f2"} Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.960371 4932 generic.go:334] "Generic (PLEG): container finished" podID="eb900453-5ad2-4591-a371-59be9fe5f5f4" containerID="81e34d344ce88b4eb1ff0fd3bb2c1d3b97b252c19f190b83c20a61e09177d506" exitCode=0 Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.960492 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c672-account-create-npp8v" event={"ID":"eb900453-5ad2-4591-a371-59be9fe5f5f4","Type":"ContainerDied","Data":"81e34d344ce88b4eb1ff0fd3bb2c1d3b97b252c19f190b83c20a61e09177d506"} Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.960552 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c672-account-create-npp8v" event={"ID":"eb900453-5ad2-4591-a371-59be9fe5f5f4","Type":"ContainerStarted","Data":"3ecdd3864dfa508861878b4553cd3e5303aa72e08e5236f35c290a12dcf857a3"} Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.962398 4932 generic.go:334] "Generic (PLEG): container finished" podID="9b647685-9f5e-4dcb-a0a8-4739ecfb6330" containerID="4b01ec669e9a950b868d84be0d32b86e3cab6ccbee292c9aa62528c74878ffd5" exitCode=0 Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.962431 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3891-account-create-8qvzt" event={"ID":"9b647685-9f5e-4dcb-a0a8-4739ecfb6330","Type":"ContainerDied","Data":"4b01ec669e9a950b868d84be0d32b86e3cab6ccbee292c9aa62528c74878ffd5"} Nov 25 10:27:41 crc kubenswrapper[4932]: I1125 10:27:41.962470 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3891-account-create-8qvzt" event={"ID":"9b647685-9f5e-4dcb-a0a8-4739ecfb6330","Type":"ContainerStarted","Data":"a38e91d211c74b8ad2eda451c429287a1a643d5f33ce98fde0a0609abb1e0b83"} Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.366907 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3179-account-create-fznsx" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.497182 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjhzf\" (UniqueName: \"kubernetes.io/projected/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-kube-api-access-gjhzf\") pod \"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8\" (UID: \"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.497308 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-operator-scripts\") pod \"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8\" (UID: \"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.498299 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bd469507-d0a9-4a7d-9a60-2dd43e09c8a8" (UID: "bd469507-d0a9-4a7d-9a60-2dd43e09c8a8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.503896 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-kube-api-access-gjhzf" (OuterVolumeSpecName: "kube-api-access-gjhzf") pod "bd469507-d0a9-4a7d-9a60-2dd43e09c8a8" (UID: "bd469507-d0a9-4a7d-9a60-2dd43e09c8a8"). InnerVolumeSpecName "kube-api-access-gjhzf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.555138 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fw5g6" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.561602 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3891-account-create-8qvzt" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.579669 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ljdqm" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.589269 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c672-account-create-npp8v" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.599331 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjhzf\" (UniqueName: \"kubernetes.io/projected/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-kube-api-access-gjhzf\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.599370 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.601844 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-db76q" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.700418 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnqqr\" (UniqueName: \"kubernetes.io/projected/eb900453-5ad2-4591-a371-59be9fe5f5f4-kube-api-access-dnqqr\") pod \"eb900453-5ad2-4591-a371-59be9fe5f5f4\" (UID: \"eb900453-5ad2-4591-a371-59be9fe5f5f4\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.700478 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4z4g4\" (UniqueName: \"kubernetes.io/projected/a5418610-a8c5-42e4-a5df-14af19703a42-kube-api-access-4z4g4\") pod \"a5418610-a8c5-42e4-a5df-14af19703a42\" (UID: \"a5418610-a8c5-42e4-a5df-14af19703a42\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.700548 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5418610-a8c5-42e4-a5df-14af19703a42-operator-scripts\") pod \"a5418610-a8c5-42e4-a5df-14af19703a42\" (UID: \"a5418610-a8c5-42e4-a5df-14af19703a42\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.700588 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c76fa982-897c-4948-8ad7-a111c8399e74-operator-scripts\") pod \"c76fa982-897c-4948-8ad7-a111c8399e74\" (UID: \"c76fa982-897c-4948-8ad7-a111c8399e74\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.700990 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-operator-scripts\") pod \"9b647685-9f5e-4dcb-a0a8-4739ecfb6330\" (UID: \"9b647685-9f5e-4dcb-a0a8-4739ecfb6330\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.701054 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb900453-5ad2-4591-a371-59be9fe5f5f4-operator-scripts\") pod \"eb900453-5ad2-4591-a371-59be9fe5f5f4\" (UID: \"eb900453-5ad2-4591-a371-59be9fe5f5f4\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.701170 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c76fa982-897c-4948-8ad7-a111c8399e74-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c76fa982-897c-4948-8ad7-a111c8399e74" (UID: "c76fa982-897c-4948-8ad7-a111c8399e74"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.701218 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kx9h2\" (UniqueName: \"kubernetes.io/projected/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-kube-api-access-kx9h2\") pod \"9b647685-9f5e-4dcb-a0a8-4739ecfb6330\" (UID: \"9b647685-9f5e-4dcb-a0a8-4739ecfb6330\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.701227 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5418610-a8c5-42e4-a5df-14af19703a42-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a5418610-a8c5-42e4-a5df-14af19703a42" (UID: "a5418610-a8c5-42e4-a5df-14af19703a42"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.701261 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxkdd\" (UniqueName: \"kubernetes.io/projected/c76fa982-897c-4948-8ad7-a111c8399e74-kube-api-access-rxkdd\") pod \"c76fa982-897c-4948-8ad7-a111c8399e74\" (UID: \"c76fa982-897c-4948-8ad7-a111c8399e74\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.701450 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9b647685-9f5e-4dcb-a0a8-4739ecfb6330" (UID: "9b647685-9f5e-4dcb-a0a8-4739ecfb6330"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.701617 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb900453-5ad2-4591-a371-59be9fe5f5f4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "eb900453-5ad2-4591-a371-59be9fe5f5f4" (UID: "eb900453-5ad2-4591-a371-59be9fe5f5f4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.702035 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb900453-5ad2-4591-a371-59be9fe5f5f4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.702061 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a5418610-a8c5-42e4-a5df-14af19703a42-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.702072 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c76fa982-897c-4948-8ad7-a111c8399e74-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.702082 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.704112 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb900453-5ad2-4591-a371-59be9fe5f5f4-kube-api-access-dnqqr" (OuterVolumeSpecName: "kube-api-access-dnqqr") pod "eb900453-5ad2-4591-a371-59be9fe5f5f4" (UID: "eb900453-5ad2-4591-a371-59be9fe5f5f4"). InnerVolumeSpecName "kube-api-access-dnqqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.705688 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5418610-a8c5-42e4-a5df-14af19703a42-kube-api-access-4z4g4" (OuterVolumeSpecName: "kube-api-access-4z4g4") pod "a5418610-a8c5-42e4-a5df-14af19703a42" (UID: "a5418610-a8c5-42e4-a5df-14af19703a42"). InnerVolumeSpecName "kube-api-access-4z4g4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.706914 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-kube-api-access-kx9h2" (OuterVolumeSpecName: "kube-api-access-kx9h2") pod "9b647685-9f5e-4dcb-a0a8-4739ecfb6330" (UID: "9b647685-9f5e-4dcb-a0a8-4739ecfb6330"). InnerVolumeSpecName "kube-api-access-kx9h2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.709182 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c76fa982-897c-4948-8ad7-a111c8399e74-kube-api-access-rxkdd" (OuterVolumeSpecName: "kube-api-access-rxkdd") pod "c76fa982-897c-4948-8ad7-a111c8399e74" (UID: "c76fa982-897c-4948-8ad7-a111c8399e74"). InnerVolumeSpecName "kube-api-access-rxkdd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.802909 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aeb1135f-f2d3-4259-ae52-39f9df4a5582-operator-scripts\") pod \"aeb1135f-f2d3-4259-ae52-39f9df4a5582\" (UID: \"aeb1135f-f2d3-4259-ae52-39f9df4a5582\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.803083 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhhxk\" (UniqueName: \"kubernetes.io/projected/aeb1135f-f2d3-4259-ae52-39f9df4a5582-kube-api-access-zhhxk\") pod \"aeb1135f-f2d3-4259-ae52-39f9df4a5582\" (UID: \"aeb1135f-f2d3-4259-ae52-39f9df4a5582\") " Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.803542 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kx9h2\" (UniqueName: \"kubernetes.io/projected/9b647685-9f5e-4dcb-a0a8-4739ecfb6330-kube-api-access-kx9h2\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.803564 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxkdd\" (UniqueName: \"kubernetes.io/projected/c76fa982-897c-4948-8ad7-a111c8399e74-kube-api-access-rxkdd\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.803575 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnqqr\" (UniqueName: \"kubernetes.io/projected/eb900453-5ad2-4591-a371-59be9fe5f5f4-kube-api-access-dnqqr\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.803585 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4z4g4\" (UniqueName: \"kubernetes.io/projected/a5418610-a8c5-42e4-a5df-14af19703a42-kube-api-access-4z4g4\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.804289 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aeb1135f-f2d3-4259-ae52-39f9df4a5582-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aeb1135f-f2d3-4259-ae52-39f9df4a5582" (UID: "aeb1135f-f2d3-4259-ae52-39f9df4a5582"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.806694 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aeb1135f-f2d3-4259-ae52-39f9df4a5582-kube-api-access-zhhxk" (OuterVolumeSpecName: "kube-api-access-zhhxk") pod "aeb1135f-f2d3-4259-ae52-39f9df4a5582" (UID: "aeb1135f-f2d3-4259-ae52-39f9df4a5582"). InnerVolumeSpecName "kube-api-access-zhhxk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.904947 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aeb1135f-f2d3-4259-ae52-39f9df4a5582-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.904985 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhhxk\" (UniqueName: \"kubernetes.io/projected/aeb1135f-f2d3-4259-ae52-39f9df4a5582-kube-api-access-zhhxk\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.981420 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fw5g6" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.981421 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-fw5g6" event={"ID":"a5418610-a8c5-42e4-a5df-14af19703a42","Type":"ContainerDied","Data":"67aceb4960faa2f9a17fe81e86ea2806de9a124304cb094b0189e917f753b8f2"} Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.981567 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67aceb4960faa2f9a17fe81e86ea2806de9a124304cb094b0189e917f753b8f2" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.983121 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c672-account-create-npp8v" event={"ID":"eb900453-5ad2-4591-a371-59be9fe5f5f4","Type":"ContainerDied","Data":"3ecdd3864dfa508861878b4553cd3e5303aa72e08e5236f35c290a12dcf857a3"} Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.983165 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ecdd3864dfa508861878b4553cd3e5303aa72e08e5236f35c290a12dcf857a3" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.983183 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c672-account-create-npp8v" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.984979 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3891-account-create-8qvzt" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.985326 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3891-account-create-8qvzt" event={"ID":"9b647685-9f5e-4dcb-a0a8-4739ecfb6330","Type":"ContainerDied","Data":"a38e91d211c74b8ad2eda451c429287a1a643d5f33ce98fde0a0609abb1e0b83"} Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.985353 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a38e91d211c74b8ad2eda451c429287a1a643d5f33ce98fde0a0609abb1e0b83" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.987950 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-db76q" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.987940 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-db76q" event={"ID":"aeb1135f-f2d3-4259-ae52-39f9df4a5582","Type":"ContainerDied","Data":"54354d01f248366f90d716a9d6dfd63e679fa0579dab8564393376eae05aa155"} Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.988108 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54354d01f248366f90d716a9d6dfd63e679fa0579dab8564393376eae05aa155" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.989915 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-3179-account-create-fznsx" event={"ID":"bd469507-d0a9-4a7d-9a60-2dd43e09c8a8","Type":"ContainerDied","Data":"9536e0d63693287765014412841a16c683dd51e879d9e12f0fcc7162c6004ddb"} Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.989942 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9536e0d63693287765014412841a16c683dd51e879d9e12f0fcc7162c6004ddb" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.989923 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-3179-account-create-fznsx" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.991319 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-ljdqm" event={"ID":"c76fa982-897c-4948-8ad7-a111c8399e74","Type":"ContainerDied","Data":"ed116b867670386ba2aa4277103478f724d481afac252c408e529775c6e6cf0b"} Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.991343 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed116b867670386ba2aa4277103478f724d481afac252c408e529775c6e6cf0b" Nov 25 10:27:43 crc kubenswrapper[4932]: I1125 10:27:43.991490 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-ljdqm" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.605952 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:27:45 crc kubenswrapper[4932]: E1125 10:27:45.606483 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609022 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-4s296"] Nov 25 10:27:45 crc kubenswrapper[4932]: E1125 10:27:45.609508 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c76fa982-897c-4948-8ad7-a111c8399e74" containerName="mariadb-database-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609533 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c76fa982-897c-4948-8ad7-a111c8399e74" containerName="mariadb-database-create" Nov 25 10:27:45 crc kubenswrapper[4932]: E1125 10:27:45.609554 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5418610-a8c5-42e4-a5df-14af19703a42" containerName="mariadb-database-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609565 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5418610-a8c5-42e4-a5df-14af19703a42" containerName="mariadb-database-create" Nov 25 10:27:45 crc kubenswrapper[4932]: E1125 10:27:45.609585 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd469507-d0a9-4a7d-9a60-2dd43e09c8a8" containerName="mariadb-account-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609594 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd469507-d0a9-4a7d-9a60-2dd43e09c8a8" containerName="mariadb-account-create" Nov 25 10:27:45 crc kubenswrapper[4932]: E1125 10:27:45.609612 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b647685-9f5e-4dcb-a0a8-4739ecfb6330" containerName="mariadb-account-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609621 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b647685-9f5e-4dcb-a0a8-4739ecfb6330" containerName="mariadb-account-create" Nov 25 10:27:45 crc kubenswrapper[4932]: E1125 10:27:45.609648 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb900453-5ad2-4591-a371-59be9fe5f5f4" containerName="mariadb-account-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609655 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb900453-5ad2-4591-a371-59be9fe5f5f4" containerName="mariadb-account-create" Nov 25 10:27:45 crc kubenswrapper[4932]: E1125 10:27:45.609677 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aeb1135f-f2d3-4259-ae52-39f9df4a5582" containerName="mariadb-database-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609685 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="aeb1135f-f2d3-4259-ae52-39f9df4a5582" containerName="mariadb-database-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609907 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5418610-a8c5-42e4-a5df-14af19703a42" containerName="mariadb-database-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609931 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="aeb1135f-f2d3-4259-ae52-39f9df4a5582" containerName="mariadb-database-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609944 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd469507-d0a9-4a7d-9a60-2dd43e09c8a8" containerName="mariadb-account-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609959 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb900453-5ad2-4591-a371-59be9fe5f5f4" containerName="mariadb-account-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609981 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b647685-9f5e-4dcb-a0a8-4739ecfb6330" containerName="mariadb-account-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.609998 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c76fa982-897c-4948-8ad7-a111c8399e74" containerName="mariadb-database-create" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.610846 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.612848 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-zg8db" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.613534 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.613848 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.618184 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-4s296"] Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.742761 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-scripts\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.742828 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.742859 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blgk9\" (UniqueName: \"kubernetes.io/projected/54b73df3-3af6-40b2-8116-47f32031ac99-kube-api-access-blgk9\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.742939 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-config-data\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.844740 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blgk9\" (UniqueName: \"kubernetes.io/projected/54b73df3-3af6-40b2-8116-47f32031ac99-kube-api-access-blgk9\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.844862 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-config-data\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.844967 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-scripts\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.845008 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.850672 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-scripts\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.850867 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.864138 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-config-data\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.866625 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blgk9\" (UniqueName: \"kubernetes.io/projected/54b73df3-3af6-40b2-8116-47f32031ac99-kube-api-access-blgk9\") pod \"nova-cell0-conductor-db-sync-4s296\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:45 crc kubenswrapper[4932]: I1125 10:27:45.945179 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:46 crc kubenswrapper[4932]: I1125 10:27:46.414337 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-4s296"] Nov 25 10:27:46 crc kubenswrapper[4932]: W1125 10:27:46.422164 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod54b73df3_3af6_40b2_8116_47f32031ac99.slice/crio-a55c3f0b608ad78c838ab1e74f8848f7d22976e8f6052aa8e6610d213e4417fb WatchSource:0}: Error finding container a55c3f0b608ad78c838ab1e74f8848f7d22976e8f6052aa8e6610d213e4417fb: Status 404 returned error can't find the container with id a55c3f0b608ad78c838ab1e74f8848f7d22976e8f6052aa8e6610d213e4417fb Nov 25 10:27:47 crc kubenswrapper[4932]: I1125 10:27:47.028176 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-4s296" event={"ID":"54b73df3-3af6-40b2-8116-47f32031ac99","Type":"ContainerStarted","Data":"47391dcf8393f2c23b2a4b3459c14008e6d3283d2a862c5dedb8cf5da3d7afb1"} Nov 25 10:27:47 crc kubenswrapper[4932]: I1125 10:27:47.028597 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-4s296" event={"ID":"54b73df3-3af6-40b2-8116-47f32031ac99","Type":"ContainerStarted","Data":"a55c3f0b608ad78c838ab1e74f8848f7d22976e8f6052aa8e6610d213e4417fb"} Nov 25 10:27:47 crc kubenswrapper[4932]: I1125 10:27:47.052552 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-4s296" podStartSLOduration=2.052525941 podStartE2EDuration="2.052525941s" podCreationTimestamp="2025-11-25 10:27:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:27:47.042732061 +0000 UTC m=+5927.168761634" watchObservedRunningTime="2025-11-25 10:27:47.052525941 +0000 UTC m=+5927.178555504" Nov 25 10:27:52 crc kubenswrapper[4932]: I1125 10:27:52.078506 4932 generic.go:334] "Generic (PLEG): container finished" podID="54b73df3-3af6-40b2-8116-47f32031ac99" containerID="47391dcf8393f2c23b2a4b3459c14008e6d3283d2a862c5dedb8cf5da3d7afb1" exitCode=0 Nov 25 10:27:52 crc kubenswrapper[4932]: I1125 10:27:52.078615 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-4s296" event={"ID":"54b73df3-3af6-40b2-8116-47f32031ac99","Type":"ContainerDied","Data":"47391dcf8393f2c23b2a4b3459c14008e6d3283d2a862c5dedb8cf5da3d7afb1"} Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.397581 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.597232 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-config-data\") pod \"54b73df3-3af6-40b2-8116-47f32031ac99\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.597506 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blgk9\" (UniqueName: \"kubernetes.io/projected/54b73df3-3af6-40b2-8116-47f32031ac99-kube-api-access-blgk9\") pod \"54b73df3-3af6-40b2-8116-47f32031ac99\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.597649 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-scripts\") pod \"54b73df3-3af6-40b2-8116-47f32031ac99\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.597716 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-combined-ca-bundle\") pod \"54b73df3-3af6-40b2-8116-47f32031ac99\" (UID: \"54b73df3-3af6-40b2-8116-47f32031ac99\") " Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.603415 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-scripts" (OuterVolumeSpecName: "scripts") pod "54b73df3-3af6-40b2-8116-47f32031ac99" (UID: "54b73df3-3af6-40b2-8116-47f32031ac99"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.604389 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54b73df3-3af6-40b2-8116-47f32031ac99-kube-api-access-blgk9" (OuterVolumeSpecName: "kube-api-access-blgk9") pod "54b73df3-3af6-40b2-8116-47f32031ac99" (UID: "54b73df3-3af6-40b2-8116-47f32031ac99"). InnerVolumeSpecName "kube-api-access-blgk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.628573 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "54b73df3-3af6-40b2-8116-47f32031ac99" (UID: "54b73df3-3af6-40b2-8116-47f32031ac99"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.629661 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-config-data" (OuterVolumeSpecName: "config-data") pod "54b73df3-3af6-40b2-8116-47f32031ac99" (UID: "54b73df3-3af6-40b2-8116-47f32031ac99"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.700038 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.701141 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blgk9\" (UniqueName: \"kubernetes.io/projected/54b73df3-3af6-40b2-8116-47f32031ac99-kube-api-access-blgk9\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.701233 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:53 crc kubenswrapper[4932]: I1125 10:27:53.701291 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/54b73df3-3af6-40b2-8116-47f32031ac99-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.097102 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-4s296" event={"ID":"54b73df3-3af6-40b2-8116-47f32031ac99","Type":"ContainerDied","Data":"a55c3f0b608ad78c838ab1e74f8848f7d22976e8f6052aa8e6610d213e4417fb"} Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.097156 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-4s296" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.097167 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a55c3f0b608ad78c838ab1e74f8848f7d22976e8f6052aa8e6610d213e4417fb" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.205223 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 10:27:54 crc kubenswrapper[4932]: E1125 10:27:54.206622 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54b73df3-3af6-40b2-8116-47f32031ac99" containerName="nova-cell0-conductor-db-sync" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.206646 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="54b73df3-3af6-40b2-8116-47f32031ac99" containerName="nova-cell0-conductor-db-sync" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.207404 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="54b73df3-3af6-40b2-8116-47f32031ac99" containerName="nova-cell0-conductor-db-sync" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.210938 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.223273 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-zg8db" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.223528 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.224417 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.414076 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hzw9\" (UniqueName: \"kubernetes.io/projected/f1317b6f-567b-44d0-93b1-9acdc75baa75-kube-api-access-2hzw9\") pod \"nova-cell0-conductor-0\" (UID: \"f1317b6f-567b-44d0-93b1-9acdc75baa75\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.414212 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1317b6f-567b-44d0-93b1-9acdc75baa75-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f1317b6f-567b-44d0-93b1-9acdc75baa75\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.414231 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1317b6f-567b-44d0-93b1-9acdc75baa75-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f1317b6f-567b-44d0-93b1-9acdc75baa75\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.516104 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1317b6f-567b-44d0-93b1-9acdc75baa75-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f1317b6f-567b-44d0-93b1-9acdc75baa75\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.516209 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1317b6f-567b-44d0-93b1-9acdc75baa75-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f1317b6f-567b-44d0-93b1-9acdc75baa75\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.516351 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hzw9\" (UniqueName: \"kubernetes.io/projected/f1317b6f-567b-44d0-93b1-9acdc75baa75-kube-api-access-2hzw9\") pod \"nova-cell0-conductor-0\" (UID: \"f1317b6f-567b-44d0-93b1-9acdc75baa75\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.522011 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1317b6f-567b-44d0-93b1-9acdc75baa75-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f1317b6f-567b-44d0-93b1-9acdc75baa75\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.530123 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1317b6f-567b-44d0-93b1-9acdc75baa75-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f1317b6f-567b-44d0-93b1-9acdc75baa75\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.531841 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hzw9\" (UniqueName: \"kubernetes.io/projected/f1317b6f-567b-44d0-93b1-9acdc75baa75-kube-api-access-2hzw9\") pod \"nova-cell0-conductor-0\" (UID: \"f1317b6f-567b-44d0-93b1-9acdc75baa75\") " pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.544209 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:54 crc kubenswrapper[4932]: I1125 10:27:54.985963 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 10:27:55 crc kubenswrapper[4932]: I1125 10:27:55.107319 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f1317b6f-567b-44d0-93b1-9acdc75baa75","Type":"ContainerStarted","Data":"05586c12691fe06b3ccf613e2ec798672d18eaa72c003f53f17368078d79df58"} Nov 25 10:27:56 crc kubenswrapper[4932]: I1125 10:27:56.117990 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f1317b6f-567b-44d0-93b1-9acdc75baa75","Type":"ContainerStarted","Data":"a747cb7735edaaf42a688bb100a5c963da955229e05bee01d7953e42668534df"} Nov 25 10:27:56 crc kubenswrapper[4932]: I1125 10:27:56.118715 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 10:27:56 crc kubenswrapper[4932]: I1125 10:27:56.139417 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.139394159 podStartE2EDuration="2.139394159s" podCreationTimestamp="2025-11-25 10:27:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:27:56.13561247 +0000 UTC m=+5936.261642043" watchObservedRunningTime="2025-11-25 10:27:56.139394159 +0000 UTC m=+5936.265423722" Nov 25 10:27:57 crc kubenswrapper[4932]: I1125 10:27:57.606339 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:27:57 crc kubenswrapper[4932]: E1125 10:27:57.606989 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:28:04 crc kubenswrapper[4932]: I1125 10:28:04.572375 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 25 10:28:04 crc kubenswrapper[4932]: I1125 10:28:04.992544 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-fjmbp"] Nov 25 10:28:04 crc kubenswrapper[4932]: I1125 10:28:04.993981 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:04 crc kubenswrapper[4932]: I1125 10:28:04.996384 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 25 10:28:04 crc kubenswrapper[4932]: I1125 10:28:04.996592 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.019933 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-fjmbp"] Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.023799 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-scripts\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.023908 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.023942 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-config-data\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.024061 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl67x\" (UniqueName: \"kubernetes.io/projected/67c688f2-d79f-41e2-82d5-88b15fd52efd-kube-api-access-bl67x\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.123244 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.125385 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.127221 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fk4l2\" (UniqueName: \"kubernetes.io/projected/fd35a8fd-f978-4863-98a7-9481c7ce10c9-kube-api-access-fk4l2\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.127283 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd35a8fd-f978-4863-98a7-9481c7ce10c9-logs\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.127318 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-config-data\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.127368 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl67x\" (UniqueName: \"kubernetes.io/projected/67c688f2-d79f-41e2-82d5-88b15fd52efd-kube-api-access-bl67x\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.127450 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-scripts\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.127497 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.127543 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.127576 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-config-data\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.132026 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.139947 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-scripts\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.141986 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.151975 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.162254 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-config-data\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.193014 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl67x\" (UniqueName: \"kubernetes.io/projected/67c688f2-d79f-41e2-82d5-88b15fd52efd-kube-api-access-bl67x\") pod \"nova-cell0-cell-mapping-fjmbp\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.230149 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fk4l2\" (UniqueName: \"kubernetes.io/projected/fd35a8fd-f978-4863-98a7-9481c7ce10c9-kube-api-access-fk4l2\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.230568 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd35a8fd-f978-4863-98a7-9481c7ce10c9-logs\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.230614 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-config-data\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.230769 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.232133 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd35a8fd-f978-4863-98a7-9481c7ce10c9-logs\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.241615 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.246437 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.248556 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.253651 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.255908 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-config-data\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.266362 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.268095 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.282745 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.296048 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fk4l2\" (UniqueName: \"kubernetes.io/projected/fd35a8fd-f978-4863-98a7-9481c7ce10c9-kube-api-access-fk4l2\") pod \"nova-api-0\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.299061 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.310451 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.320219 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.365343 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.391020 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.393067 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.397690 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.440134 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-774d5f4bd7-zrqhp"] Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.455849 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b673601-7051-4474-a475-bb8beb132e9e-logs\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.455995 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mlrf\" (UniqueName: \"kubernetes.io/projected/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-kube-api-access-4mlrf\") pod \"nova-cell1-novncproxy-0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.458246 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.458308 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.458370 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2m8g\" (UniqueName: \"kubernetes.io/projected/0b673601-7051-4474-a475-bb8beb132e9e-kube-api-access-p2m8g\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.458498 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.458635 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-config-data\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.487347 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.520949 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.547308 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-774d5f4bd7-zrqhp"] Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.560953 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-config-data\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.561025 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b673601-7051-4474-a475-bb8beb132e9e-logs\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.561072 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-config-data\") pod \"nova-scheduler-0\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.561120 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mlrf\" (UniqueName: \"kubernetes.io/projected/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-kube-api-access-4mlrf\") pod \"nova-cell1-novncproxy-0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.561145 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.561219 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jh47v\" (UniqueName: \"kubernetes.io/projected/3a9aa1a5-880d-442a-bf77-e290e9acbe80-kube-api-access-jh47v\") pod \"nova-scheduler-0\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.561275 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.561295 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.561327 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2m8g\" (UniqueName: \"kubernetes.io/projected/0b673601-7051-4474-a475-bb8beb132e9e-kube-api-access-p2m8g\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.561380 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.564500 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b673601-7051-4474-a475-bb8beb132e9e-logs\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.566001 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-config-data\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.566950 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.569905 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.570159 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.583219 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mlrf\" (UniqueName: \"kubernetes.io/projected/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-kube-api-access-4mlrf\") pod \"nova-cell1-novncproxy-0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.595210 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2m8g\" (UniqueName: \"kubernetes.io/projected/0b673601-7051-4474-a475-bb8beb132e9e-kube-api-access-p2m8g\") pod \"nova-metadata-0\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.664338 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-config\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.664384 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-sb\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.664467 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-nb\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.664499 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-config-data\") pod \"nova-scheduler-0\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.664521 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-dns-svc\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.664545 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pgd72\" (UniqueName: \"kubernetes.io/projected/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-kube-api-access-pgd72\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.664588 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.664635 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jh47v\" (UniqueName: \"kubernetes.io/projected/3a9aa1a5-880d-442a-bf77-e290e9acbe80-kube-api-access-jh47v\") pod \"nova-scheduler-0\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.672887 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-config-data\") pod \"nova-scheduler-0\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.674732 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.689872 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jh47v\" (UniqueName: \"kubernetes.io/projected/3a9aa1a5-880d-442a-bf77-e290e9acbe80-kube-api-access-jh47v\") pod \"nova-scheduler-0\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.718520 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.738925 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.763805 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.770288 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-config\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.770339 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-sb\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.770474 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-nb\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.770503 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-dns-svc\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.770531 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pgd72\" (UniqueName: \"kubernetes.io/projected/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-kube-api-access-pgd72\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.773174 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-config\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.774111 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-nb\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.774239 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-sb\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.775086 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-dns-svc\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.802063 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pgd72\" (UniqueName: \"kubernetes.io/projected/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-kube-api-access-pgd72\") pod \"dnsmasq-dns-774d5f4bd7-zrqhp\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.826080 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:05 crc kubenswrapper[4932]: I1125 10:28:05.928482 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-fjmbp"] Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.027684 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:06 crc kubenswrapper[4932]: W1125 10:28:06.034553 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd35a8fd_f978_4863_98a7_9481c7ce10c9.slice/crio-daad0d4355bbfaf74613c3f09537c25994b36b60edff1b2c95dc1b833be19401 WatchSource:0}: Error finding container daad0d4355bbfaf74613c3f09537c25994b36b60edff1b2c95dc1b833be19401: Status 404 returned error can't find the container with id daad0d4355bbfaf74613c3f09537c25994b36b60edff1b2c95dc1b833be19401 Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.131136 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wklbb"] Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.132443 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.138265 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.139048 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.146437 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wklbb"] Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.182067 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.182167 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmmpv\" (UniqueName: \"kubernetes.io/projected/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-kube-api-access-lmmpv\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.182261 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-config-data\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.182295 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-scripts\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.248123 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fd35a8fd-f978-4863-98a7-9481c7ce10c9","Type":"ContainerStarted","Data":"daad0d4355bbfaf74613c3f09537c25994b36b60edff1b2c95dc1b833be19401"} Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.252775 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fjmbp" event={"ID":"67c688f2-d79f-41e2-82d5-88b15fd52efd","Type":"ContainerStarted","Data":"54101920e575d2591fe6eef16b01205f0237dd6fa57f263405f679e0224152fb"} Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.283604 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-config-data\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.283655 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-scripts\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.284967 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.286639 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-fjmbp" podStartSLOduration=2.286621163 podStartE2EDuration="2.286621163s" podCreationTimestamp="2025-11-25 10:28:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:06.278710506 +0000 UTC m=+5946.404740079" watchObservedRunningTime="2025-11-25 10:28:06.286621163 +0000 UTC m=+5946.412650726" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.287099 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmmpv\" (UniqueName: \"kubernetes.io/projected/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-kube-api-access-lmmpv\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.307278 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.308910 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-config-data\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.313583 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-scripts\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.318411 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmmpv\" (UniqueName: \"kubernetes.io/projected/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-kube-api-access-lmmpv\") pod \"nova-cell1-conductor-db-sync-wklbb\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: W1125 10:28:06.326891 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0b673601_7051_4474_a475_bb8beb132e9e.slice/crio-6c816ec834508c1e2cdfba144d0a1c2781d97f1aa2d42311eb2878e10fae242d WatchSource:0}: Error finding container 6c816ec834508c1e2cdfba144d0a1c2781d97f1aa2d42311eb2878e10fae242d: Status 404 returned error can't find the container with id 6c816ec834508c1e2cdfba144d0a1c2781d97f1aa2d42311eb2878e10fae242d Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.327758 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.443858 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:28:06 crc kubenswrapper[4932]: W1125 10:28:06.444568 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a9aa1a5_880d_442a_bf77_e290e9acbe80.slice/crio-d845741920080002d0223cfaccce9ef639895c6ae24be8f1c7310db9c97c4bc3 WatchSource:0}: Error finding container d845741920080002d0223cfaccce9ef639895c6ae24be8f1c7310db9c97c4bc3: Status 404 returned error can't find the container with id d845741920080002d0223cfaccce9ef639895c6ae24be8f1c7310db9c97c4bc3 Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.465665 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:06 crc kubenswrapper[4932]: W1125 10:28:06.506893 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31cdf0e8_314d_4468_8aa6_b6626aba6dd0.slice/crio-e72cda0749ea7ac3e38cecdbc3c1ea29d284cfecc724bcc6ae4a32a08744f7ed WatchSource:0}: Error finding container e72cda0749ea7ac3e38cecdbc3c1ea29d284cfecc724bcc6ae4a32a08744f7ed: Status 404 returned error can't find the container with id e72cda0749ea7ac3e38cecdbc3c1ea29d284cfecc724bcc6ae4a32a08744f7ed Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.513976 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:28:06 crc kubenswrapper[4932]: I1125 10:28:06.533329 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-774d5f4bd7-zrqhp"] Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.029766 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wklbb"] Nov 25 10:28:07 crc kubenswrapper[4932]: W1125 10:28:07.038052 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb104cd1_6cb3_441f_a90b_abd9bbd76cd5.slice/crio-8894266a4be00c63896cf46b8c66d327b62fce18cf3482de38499ac7fa8a111d WatchSource:0}: Error finding container 8894266a4be00c63896cf46b8c66d327b62fce18cf3482de38499ac7fa8a111d: Status 404 returned error can't find the container with id 8894266a4be00c63896cf46b8c66d327b62fce18cf3482de38499ac7fa8a111d Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.271746 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wklbb" event={"ID":"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5","Type":"ContainerStarted","Data":"8894266a4be00c63896cf46b8c66d327b62fce18cf3482de38499ac7fa8a111d"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.280258 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3a9aa1a5-880d-442a-bf77-e290e9acbe80","Type":"ContainerStarted","Data":"bb69ce3c7fbc919fbbb7fe2d6b0f77ce532a2c3e60d281184a27c6a26624ccef"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.280316 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3a9aa1a5-880d-442a-bf77-e290e9acbe80","Type":"ContainerStarted","Data":"d845741920080002d0223cfaccce9ef639895c6ae24be8f1c7310db9c97c4bc3"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.287653 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fd35a8fd-f978-4863-98a7-9481c7ce10c9","Type":"ContainerStarted","Data":"994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.287732 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fd35a8fd-f978-4863-98a7-9481c7ce10c9","Type":"ContainerStarted","Data":"d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.292408 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fjmbp" event={"ID":"67c688f2-d79f-41e2-82d5-88b15fd52efd","Type":"ContainerStarted","Data":"1d6c62ab15a50b591f5c2b83752679505ad726bb12ee0a9c1db2bdd72d2e0b0a"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.298150 4932 generic.go:334] "Generic (PLEG): container finished" podID="519b4b37-e54b-400e-b6fb-2ecafb8b59fe" containerID="685337404dc9c6f4bc92b51f6a82ba5beed1ca22df709eed9986a60af8edbaf6" exitCode=0 Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.298239 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" event={"ID":"519b4b37-e54b-400e-b6fb-2ecafb8b59fe","Type":"ContainerDied","Data":"685337404dc9c6f4bc92b51f6a82ba5beed1ca22df709eed9986a60af8edbaf6"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.298265 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" event={"ID":"519b4b37-e54b-400e-b6fb-2ecafb8b59fe","Type":"ContainerStarted","Data":"c0fe358ffd28384489c82702febb846378ea50e4db35e47f65757f7b8028b0bc"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.304082 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.304058777 podStartE2EDuration="2.304058777s" podCreationTimestamp="2025-11-25 10:28:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:07.294064641 +0000 UTC m=+5947.420094194" watchObservedRunningTime="2025-11-25 10:28:07.304058777 +0000 UTC m=+5947.430088360" Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.306486 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"31cdf0e8-314d-4468-8aa6-b6626aba6dd0","Type":"ContainerStarted","Data":"397dc24d22e8b3892c16fc2da8df864f44f9f752fa35d732426aa86a0b95f2c6"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.306531 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"31cdf0e8-314d-4468-8aa6-b6626aba6dd0","Type":"ContainerStarted","Data":"e72cda0749ea7ac3e38cecdbc3c1ea29d284cfecc724bcc6ae4a32a08744f7ed"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.317665 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b673601-7051-4474-a475-bb8beb132e9e","Type":"ContainerStarted","Data":"96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.317720 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b673601-7051-4474-a475-bb8beb132e9e","Type":"ContainerStarted","Data":"c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.317732 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b673601-7051-4474-a475-bb8beb132e9e","Type":"ContainerStarted","Data":"6c816ec834508c1e2cdfba144d0a1c2781d97f1aa2d42311eb2878e10fae242d"} Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.326994 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.326969514 podStartE2EDuration="2.326969514s" podCreationTimestamp="2025-11-25 10:28:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:07.311758178 +0000 UTC m=+5947.437787761" watchObservedRunningTime="2025-11-25 10:28:07.326969514 +0000 UTC m=+5947.452999077" Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.428930 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.428905285 podStartE2EDuration="2.428905285s" podCreationTimestamp="2025-11-25 10:28:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:07.339626676 +0000 UTC m=+5947.465656239" watchObservedRunningTime="2025-11-25 10:28:07.428905285 +0000 UTC m=+5947.554934838" Nov 25 10:28:07 crc kubenswrapper[4932]: I1125 10:28:07.479095 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.479065102 podStartE2EDuration="2.479065102s" podCreationTimestamp="2025-11-25 10:28:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:07.379150349 +0000 UTC m=+5947.505179912" watchObservedRunningTime="2025-11-25 10:28:07.479065102 +0000 UTC m=+5947.605094665" Nov 25 10:28:08 crc kubenswrapper[4932]: I1125 10:28:08.328936 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wklbb" event={"ID":"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5","Type":"ContainerStarted","Data":"359d94849b99a181539f9853ec8b2dc8d84091d1ac4616d3ec7a5c0849c2289d"} Nov 25 10:28:08 crc kubenswrapper[4932]: I1125 10:28:08.336015 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" event={"ID":"519b4b37-e54b-400e-b6fb-2ecafb8b59fe","Type":"ContainerStarted","Data":"3023e7835d395628606ba55336c1a0d6272dbe01c002094e67b5bc6ceb74ada5"} Nov 25 10:28:08 crc kubenswrapper[4932]: I1125 10:28:08.356172 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-wklbb" podStartSLOduration=2.356148245 podStartE2EDuration="2.356148245s" podCreationTimestamp="2025-11-25 10:28:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:08.356044863 +0000 UTC m=+5948.482074426" watchObservedRunningTime="2025-11-25 10:28:08.356148245 +0000 UTC m=+5948.482177809" Nov 25 10:28:08 crc kubenswrapper[4932]: I1125 10:28:08.376580 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" podStartSLOduration=3.37655958 podStartE2EDuration="3.37655958s" podCreationTimestamp="2025-11-25 10:28:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:08.372138904 +0000 UTC m=+5948.498168467" watchObservedRunningTime="2025-11-25 10:28:08.37655958 +0000 UTC m=+5948.502589143" Nov 25 10:28:09 crc kubenswrapper[4932]: I1125 10:28:09.345677 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:09 crc kubenswrapper[4932]: I1125 10:28:09.602323 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:09 crc kubenswrapper[4932]: I1125 10:28:09.602569 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0b673601-7051-4474-a475-bb8beb132e9e" containerName="nova-metadata-log" containerID="cri-o://c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7" gracePeriod=30 Nov 25 10:28:09 crc kubenswrapper[4932]: I1125 10:28:09.602682 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0b673601-7051-4474-a475-bb8beb132e9e" containerName="nova-metadata-metadata" containerID="cri-o://96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec" gracePeriod=30 Nov 25 10:28:09 crc kubenswrapper[4932]: I1125 10:28:09.615782 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:28:09 crc kubenswrapper[4932]: I1125 10:28:09.616014 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="31cdf0e8-314d-4468-8aa6-b6626aba6dd0" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://397dc24d22e8b3892c16fc2da8df864f44f9f752fa35d732426aa86a0b95f2c6" gracePeriod=30 Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.162109 4932 scope.go:117] "RemoveContainer" containerID="8bd27c6914c81168f4782cb19a315139934faac137cfcc248c0f68c6ebf1d30c" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.196407 4932 scope.go:117] "RemoveContainer" containerID="971f030a2ab3f3af9c5b0c6f251a11332ec7e952101a7c4cb59b1d9fc5fc48e7" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.288627 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.289315 4932 scope.go:117] "RemoveContainer" containerID="2533e36145f9f9cf15754cfc4397a73ea7ec85b63f64dab9590b5b0674f183e7" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.363854 4932 generic.go:334] "Generic (PLEG): container finished" podID="31cdf0e8-314d-4468-8aa6-b6626aba6dd0" containerID="397dc24d22e8b3892c16fc2da8df864f44f9f752fa35d732426aa86a0b95f2c6" exitCode=0 Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.363915 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"31cdf0e8-314d-4468-8aa6-b6626aba6dd0","Type":"ContainerDied","Data":"397dc24d22e8b3892c16fc2da8df864f44f9f752fa35d732426aa86a0b95f2c6"} Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.368683 4932 generic.go:334] "Generic (PLEG): container finished" podID="0b673601-7051-4474-a475-bb8beb132e9e" containerID="96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec" exitCode=0 Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.368708 4932 generic.go:334] "Generic (PLEG): container finished" podID="0b673601-7051-4474-a475-bb8beb132e9e" containerID="c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7" exitCode=143 Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.368830 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b673601-7051-4474-a475-bb8beb132e9e","Type":"ContainerDied","Data":"96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec"} Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.368881 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b673601-7051-4474-a475-bb8beb132e9e","Type":"ContainerDied","Data":"c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7"} Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.368896 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0b673601-7051-4474-a475-bb8beb132e9e","Type":"ContainerDied","Data":"6c816ec834508c1e2cdfba144d0a1c2781d97f1aa2d42311eb2878e10fae242d"} Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.368917 4932 scope.go:117] "RemoveContainer" containerID="96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.370238 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.407825 4932 scope.go:117] "RemoveContainer" containerID="c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.422610 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-combined-ca-bundle\") pod \"0b673601-7051-4474-a475-bb8beb132e9e\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.422785 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b673601-7051-4474-a475-bb8beb132e9e-logs\") pod \"0b673601-7051-4474-a475-bb8beb132e9e\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.422835 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-config-data\") pod \"0b673601-7051-4474-a475-bb8beb132e9e\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.422880 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2m8g\" (UniqueName: \"kubernetes.io/projected/0b673601-7051-4474-a475-bb8beb132e9e-kube-api-access-p2m8g\") pod \"0b673601-7051-4474-a475-bb8beb132e9e\" (UID: \"0b673601-7051-4474-a475-bb8beb132e9e\") " Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.425163 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b673601-7051-4474-a475-bb8beb132e9e-logs" (OuterVolumeSpecName: "logs") pod "0b673601-7051-4474-a475-bb8beb132e9e" (UID: "0b673601-7051-4474-a475-bb8beb132e9e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.431937 4932 scope.go:117] "RemoveContainer" containerID="96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.432219 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b673601-7051-4474-a475-bb8beb132e9e-kube-api-access-p2m8g" (OuterVolumeSpecName: "kube-api-access-p2m8g") pod "0b673601-7051-4474-a475-bb8beb132e9e" (UID: "0b673601-7051-4474-a475-bb8beb132e9e"). InnerVolumeSpecName "kube-api-access-p2m8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:10 crc kubenswrapper[4932]: E1125 10:28:10.434176 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec\": container with ID starting with 96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec not found: ID does not exist" containerID="96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.434251 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec"} err="failed to get container status \"96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec\": rpc error: code = NotFound desc = could not find container \"96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec\": container with ID starting with 96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec not found: ID does not exist" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.434294 4932 scope.go:117] "RemoveContainer" containerID="c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7" Nov 25 10:28:10 crc kubenswrapper[4932]: E1125 10:28:10.434765 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7\": container with ID starting with c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7 not found: ID does not exist" containerID="c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.434789 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7"} err="failed to get container status \"c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7\": rpc error: code = NotFound desc = could not find container \"c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7\": container with ID starting with c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7 not found: ID does not exist" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.434803 4932 scope.go:117] "RemoveContainer" containerID="96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.435151 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec"} err="failed to get container status \"96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec\": rpc error: code = NotFound desc = could not find container \"96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec\": container with ID starting with 96fe933198aa52f92c6fb83fd5f2ccdd04f532f96c5d8c751ce705f1eb4a8cec not found: ID does not exist" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.435205 4932 scope.go:117] "RemoveContainer" containerID="c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.435435 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7"} err="failed to get container status \"c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7\": rpc error: code = NotFound desc = could not find container \"c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7\": container with ID starting with c9a063dc1387a24d377306ead964c07c5188f3f3dac6b993ddf4f818c3cb64f7 not found: ID does not exist" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.465839 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0b673601-7051-4474-a475-bb8beb132e9e" (UID: "0b673601-7051-4474-a475-bb8beb132e9e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.477165 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-config-data" (OuterVolumeSpecName: "config-data") pod "0b673601-7051-4474-a475-bb8beb132e9e" (UID: "0b673601-7051-4474-a475-bb8beb132e9e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.525901 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b673601-7051-4474-a475-bb8beb132e9e-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.525930 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.525941 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2m8g\" (UniqueName: \"kubernetes.io/projected/0b673601-7051-4474-a475-bb8beb132e9e-kube-api-access-p2m8g\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.525954 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b673601-7051-4474-a475-bb8beb132e9e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.526757 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.616335 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:28:10 crc kubenswrapper[4932]: E1125 10:28:10.616677 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.627001 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mlrf\" (UniqueName: \"kubernetes.io/projected/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-kube-api-access-4mlrf\") pod \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.627275 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-combined-ca-bundle\") pod \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.627376 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-config-data\") pod \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\" (UID: \"31cdf0e8-314d-4468-8aa6-b6626aba6dd0\") " Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.631431 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-kube-api-access-4mlrf" (OuterVolumeSpecName: "kube-api-access-4mlrf") pod "31cdf0e8-314d-4468-8aa6-b6626aba6dd0" (UID: "31cdf0e8-314d-4468-8aa6-b6626aba6dd0"). InnerVolumeSpecName "kube-api-access-4mlrf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.659142 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-config-data" (OuterVolumeSpecName: "config-data") pod "31cdf0e8-314d-4468-8aa6-b6626aba6dd0" (UID: "31cdf0e8-314d-4468-8aa6-b6626aba6dd0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.661295 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "31cdf0e8-314d-4468-8aa6-b6626aba6dd0" (UID: "31cdf0e8-314d-4468-8aa6-b6626aba6dd0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.697129 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.708997 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.722357 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:10 crc kubenswrapper[4932]: E1125 10:28:10.722888 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b673601-7051-4474-a475-bb8beb132e9e" containerName="nova-metadata-log" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.722906 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b673601-7051-4474-a475-bb8beb132e9e" containerName="nova-metadata-log" Nov 25 10:28:10 crc kubenswrapper[4932]: E1125 10:28:10.722953 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b673601-7051-4474-a475-bb8beb132e9e" containerName="nova-metadata-metadata" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.722961 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b673601-7051-4474-a475-bb8beb132e9e" containerName="nova-metadata-metadata" Nov 25 10:28:10 crc kubenswrapper[4932]: E1125 10:28:10.722973 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31cdf0e8-314d-4468-8aa6-b6626aba6dd0" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.722981 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="31cdf0e8-314d-4468-8aa6-b6626aba6dd0" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.723241 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="31cdf0e8-314d-4468-8aa6-b6626aba6dd0" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.723264 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b673601-7051-4474-a475-bb8beb132e9e" containerName="nova-metadata-metadata" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.723276 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b673601-7051-4474-a475-bb8beb132e9e" containerName="nova-metadata-log" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.724394 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.726888 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.727383 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.729964 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mlrf\" (UniqueName: \"kubernetes.io/projected/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-kube-api-access-4mlrf\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.729993 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.730008 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31cdf0e8-314d-4468-8aa6-b6626aba6dd0-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.750735 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.765065 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.831455 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.831532 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9wbc4\" (UniqueName: \"kubernetes.io/projected/859a89a7-6a7f-4699-a2e4-db62b1585b92-kube-api-access-9wbc4\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.831573 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-config-data\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.831595 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.831611 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/859a89a7-6a7f-4699-a2e4-db62b1585b92-logs\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.933370 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9wbc4\" (UniqueName: \"kubernetes.io/projected/859a89a7-6a7f-4699-a2e4-db62b1585b92-kube-api-access-9wbc4\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.934502 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-config-data\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.935128 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.935170 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/859a89a7-6a7f-4699-a2e4-db62b1585b92-logs\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.935437 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.935710 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/859a89a7-6a7f-4699-a2e4-db62b1585b92-logs\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.949942 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.950115 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.951397 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-config-data\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:10 crc kubenswrapper[4932]: I1125 10:28:10.952737 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9wbc4\" (UniqueName: \"kubernetes.io/projected/859a89a7-6a7f-4699-a2e4-db62b1585b92-kube-api-access-9wbc4\") pod \"nova-metadata-0\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " pod="openstack/nova-metadata-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.097591 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.382761 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"31cdf0e8-314d-4468-8aa6-b6626aba6dd0","Type":"ContainerDied","Data":"e72cda0749ea7ac3e38cecdbc3c1ea29d284cfecc724bcc6ae4a32a08744f7ed"} Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.382946 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.383130 4932 scope.go:117] "RemoveContainer" containerID="397dc24d22e8b3892c16fc2da8df864f44f9f752fa35d732426aa86a0b95f2c6" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.386450 4932 generic.go:334] "Generic (PLEG): container finished" podID="bb104cd1-6cb3-441f-a90b-abd9bbd76cd5" containerID="359d94849b99a181539f9853ec8b2dc8d84091d1ac4616d3ec7a5c0849c2289d" exitCode=0 Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.386516 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wklbb" event={"ID":"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5","Type":"ContainerDied","Data":"359d94849b99a181539f9853ec8b2dc8d84091d1ac4616d3ec7a5c0849c2289d"} Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.446357 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.461076 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.475071 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.476420 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.478521 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.478950 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.479505 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.490616 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.550388 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zxgv\" (UniqueName: \"kubernetes.io/projected/1c34af3f-8bfa-463b-a5ea-5925a663a624-kube-api-access-4zxgv\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.550483 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.550510 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.550532 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.550806 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.553920 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:11 crc kubenswrapper[4932]: W1125 10:28:11.557079 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod859a89a7_6a7f_4699_a2e4_db62b1585b92.slice/crio-f2a9b0ae699a6db75a02d24c877fd3d1d62e8ac2cef70dace033bad50fac6b70 WatchSource:0}: Error finding container f2a9b0ae699a6db75a02d24c877fd3d1d62e8ac2cef70dace033bad50fac6b70: Status 404 returned error can't find the container with id f2a9b0ae699a6db75a02d24c877fd3d1d62e8ac2cef70dace033bad50fac6b70 Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.652381 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.652495 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zxgv\" (UniqueName: \"kubernetes.io/projected/1c34af3f-8bfa-463b-a5ea-5925a663a624-kube-api-access-4zxgv\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.652567 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.652590 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.652622 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.661142 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.661426 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.661944 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.663313 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/1c34af3f-8bfa-463b-a5ea-5925a663a624-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.670310 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zxgv\" (UniqueName: \"kubernetes.io/projected/1c34af3f-8bfa-463b-a5ea-5925a663a624-kube-api-access-4zxgv\") pod \"nova-cell1-novncproxy-0\" (UID: \"1c34af3f-8bfa-463b-a5ea-5925a663a624\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:11 crc kubenswrapper[4932]: I1125 10:28:11.801898 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.227097 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 10:28:12 crc kubenswrapper[4932]: W1125 10:28:12.236093 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c34af3f_8bfa_463b_a5ea_5925a663a624.slice/crio-c4f86438fbbf7be9dc5927eaaec61a1142e33c15280416d65ee755edb9ea06e1 WatchSource:0}: Error finding container c4f86438fbbf7be9dc5927eaaec61a1142e33c15280416d65ee755edb9ea06e1: Status 404 returned error can't find the container with id c4f86438fbbf7be9dc5927eaaec61a1142e33c15280416d65ee755edb9ea06e1 Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.400369 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"859a89a7-6a7f-4699-a2e4-db62b1585b92","Type":"ContainerStarted","Data":"e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a"} Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.400780 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"859a89a7-6a7f-4699-a2e4-db62b1585b92","Type":"ContainerStarted","Data":"fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b"} Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.400793 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"859a89a7-6a7f-4699-a2e4-db62b1585b92","Type":"ContainerStarted","Data":"f2a9b0ae699a6db75a02d24c877fd3d1d62e8ac2cef70dace033bad50fac6b70"} Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.402245 4932 generic.go:334] "Generic (PLEG): container finished" podID="67c688f2-d79f-41e2-82d5-88b15fd52efd" containerID="1d6c62ab15a50b591f5c2b83752679505ad726bb12ee0a9c1db2bdd72d2e0b0a" exitCode=0 Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.402304 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fjmbp" event={"ID":"67c688f2-d79f-41e2-82d5-88b15fd52efd","Type":"ContainerDied","Data":"1d6c62ab15a50b591f5c2b83752679505ad726bb12ee0a9c1db2bdd72d2e0b0a"} Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.412467 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1c34af3f-8bfa-463b-a5ea-5925a663a624","Type":"ContainerStarted","Data":"c4f86438fbbf7be9dc5927eaaec61a1142e33c15280416d65ee755edb9ea06e1"} Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.436178 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.436157122 podStartE2EDuration="2.436157122s" podCreationTimestamp="2025-11-25 10:28:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:12.419305799 +0000 UTC m=+5952.545335362" watchObservedRunningTime="2025-11-25 10:28:12.436157122 +0000 UTC m=+5952.562186685" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.620019 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b673601-7051-4474-a475-bb8beb132e9e" path="/var/lib/kubelet/pods/0b673601-7051-4474-a475-bb8beb132e9e/volumes" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.620697 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31cdf0e8-314d-4468-8aa6-b6626aba6dd0" path="/var/lib/kubelet/pods/31cdf0e8-314d-4468-8aa6-b6626aba6dd0/volumes" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.672818 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.774122 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-config-data\") pod \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.774354 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmmpv\" (UniqueName: \"kubernetes.io/projected/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-kube-api-access-lmmpv\") pod \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.774583 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-combined-ca-bundle\") pod \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.774792 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-scripts\") pod \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\" (UID: \"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5\") " Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.780370 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-scripts" (OuterVolumeSpecName: "scripts") pod "bb104cd1-6cb3-441f-a90b-abd9bbd76cd5" (UID: "bb104cd1-6cb3-441f-a90b-abd9bbd76cd5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.780491 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-kube-api-access-lmmpv" (OuterVolumeSpecName: "kube-api-access-lmmpv") pod "bb104cd1-6cb3-441f-a90b-abd9bbd76cd5" (UID: "bb104cd1-6cb3-441f-a90b-abd9bbd76cd5"). InnerVolumeSpecName "kube-api-access-lmmpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.801934 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bb104cd1-6cb3-441f-a90b-abd9bbd76cd5" (UID: "bb104cd1-6cb3-441f-a90b-abd9bbd76cd5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.802525 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-config-data" (OuterVolumeSpecName: "config-data") pod "bb104cd1-6cb3-441f-a90b-abd9bbd76cd5" (UID: "bb104cd1-6cb3-441f-a90b-abd9bbd76cd5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.876993 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.877472 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.877539 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:12 crc kubenswrapper[4932]: I1125 10:28:12.877600 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmmpv\" (UniqueName: \"kubernetes.io/projected/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5-kube-api-access-lmmpv\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.427735 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1c34af3f-8bfa-463b-a5ea-5925a663a624","Type":"ContainerStarted","Data":"3480a333658ceae498ba6d6f1c71b5d9214711b450e12ab331f6704d54e1edb3"} Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.429213 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wklbb" event={"ID":"bb104cd1-6cb3-441f-a90b-abd9bbd76cd5","Type":"ContainerDied","Data":"8894266a4be00c63896cf46b8c66d327b62fce18cf3482de38499ac7fa8a111d"} Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.429265 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8894266a4be00c63896cf46b8c66d327b62fce18cf3482de38499ac7fa8a111d" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.429337 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wklbb" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.460481 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.460460514 podStartE2EDuration="2.460460514s" podCreationTimestamp="2025-11-25 10:28:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:13.456083139 +0000 UTC m=+5953.582112702" watchObservedRunningTime="2025-11-25 10:28:13.460460514 +0000 UTC m=+5953.586490077" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.479462 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 10:28:13 crc kubenswrapper[4932]: E1125 10:28:13.479849 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb104cd1-6cb3-441f-a90b-abd9bbd76cd5" containerName="nova-cell1-conductor-db-sync" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.479867 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb104cd1-6cb3-441f-a90b-abd9bbd76cd5" containerName="nova-cell1-conductor-db-sync" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.480073 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb104cd1-6cb3-441f-a90b-abd9bbd76cd5" containerName="nova-cell1-conductor-db-sync" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.480864 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.483467 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.501201 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.591963 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e722fab9-f837-44c6-b157-4a79a84c82a6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e722fab9-f837-44c6-b157-4a79a84c82a6\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.592331 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6cjr\" (UniqueName: \"kubernetes.io/projected/e722fab9-f837-44c6-b157-4a79a84c82a6-kube-api-access-d6cjr\") pod \"nova-cell1-conductor-0\" (UID: \"e722fab9-f837-44c6-b157-4a79a84c82a6\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.592467 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e722fab9-f837-44c6-b157-4a79a84c82a6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e722fab9-f837-44c6-b157-4a79a84c82a6\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.694988 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e722fab9-f837-44c6-b157-4a79a84c82a6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e722fab9-f837-44c6-b157-4a79a84c82a6\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.695152 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e722fab9-f837-44c6-b157-4a79a84c82a6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e722fab9-f837-44c6-b157-4a79a84c82a6\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.695289 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6cjr\" (UniqueName: \"kubernetes.io/projected/e722fab9-f837-44c6-b157-4a79a84c82a6-kube-api-access-d6cjr\") pod \"nova-cell1-conductor-0\" (UID: \"e722fab9-f837-44c6-b157-4a79a84c82a6\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.710338 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e722fab9-f837-44c6-b157-4a79a84c82a6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e722fab9-f837-44c6-b157-4a79a84c82a6\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.710499 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e722fab9-f837-44c6-b157-4a79a84c82a6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e722fab9-f837-44c6-b157-4a79a84c82a6\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.712746 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6cjr\" (UniqueName: \"kubernetes.io/projected/e722fab9-f837-44c6-b157-4a79a84c82a6-kube-api-access-d6cjr\") pod \"nova-cell1-conductor-0\" (UID: \"e722fab9-f837-44c6-b157-4a79a84c82a6\") " pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.802332 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.805288 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.899916 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-config-data\") pod \"67c688f2-d79f-41e2-82d5-88b15fd52efd\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.899962 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl67x\" (UniqueName: \"kubernetes.io/projected/67c688f2-d79f-41e2-82d5-88b15fd52efd-kube-api-access-bl67x\") pod \"67c688f2-d79f-41e2-82d5-88b15fd52efd\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.900112 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-scripts\") pod \"67c688f2-d79f-41e2-82d5-88b15fd52efd\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.900166 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-combined-ca-bundle\") pod \"67c688f2-d79f-41e2-82d5-88b15fd52efd\" (UID: \"67c688f2-d79f-41e2-82d5-88b15fd52efd\") " Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.904331 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67c688f2-d79f-41e2-82d5-88b15fd52efd-kube-api-access-bl67x" (OuterVolumeSpecName: "kube-api-access-bl67x") pod "67c688f2-d79f-41e2-82d5-88b15fd52efd" (UID: "67c688f2-d79f-41e2-82d5-88b15fd52efd"). InnerVolumeSpecName "kube-api-access-bl67x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.909384 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-scripts" (OuterVolumeSpecName: "scripts") pod "67c688f2-d79f-41e2-82d5-88b15fd52efd" (UID: "67c688f2-d79f-41e2-82d5-88b15fd52efd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.932991 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "67c688f2-d79f-41e2-82d5-88b15fd52efd" (UID: "67c688f2-d79f-41e2-82d5-88b15fd52efd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:13 crc kubenswrapper[4932]: I1125 10:28:13.935817 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-config-data" (OuterVolumeSpecName: "config-data") pod "67c688f2-d79f-41e2-82d5-88b15fd52efd" (UID: "67c688f2-d79f-41e2-82d5-88b15fd52efd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.002582 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.002616 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.002628 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c688f2-d79f-41e2-82d5-88b15fd52efd-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.002638 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl67x\" (UniqueName: \"kubernetes.io/projected/67c688f2-d79f-41e2-82d5-88b15fd52efd-kube-api-access-bl67x\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.251343 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 10:28:14 crc kubenswrapper[4932]: W1125 10:28:14.251834 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode722fab9_f837_44c6_b157_4a79a84c82a6.slice/crio-daaf26138d3a3204638462639ec16c525beb1c9517ff638f514321e6822e6279 WatchSource:0}: Error finding container daaf26138d3a3204638462639ec16c525beb1c9517ff638f514321e6822e6279: Status 404 returned error can't find the container with id daaf26138d3a3204638462639ec16c525beb1c9517ff638f514321e6822e6279 Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.439706 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-fjmbp" event={"ID":"67c688f2-d79f-41e2-82d5-88b15fd52efd","Type":"ContainerDied","Data":"54101920e575d2591fe6eef16b01205f0237dd6fa57f263405f679e0224152fb"} Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.439762 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54101920e575d2591fe6eef16b01205f0237dd6fa57f263405f679e0224152fb" Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.439734 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-fjmbp" Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.442783 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e722fab9-f837-44c6-b157-4a79a84c82a6","Type":"ContainerStarted","Data":"db1735752ec4ecc9a852cfcc85aea5f610ba42de4f633a3299e60e63943363eb"} Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.442833 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e722fab9-f837-44c6-b157-4a79a84c82a6","Type":"ContainerStarted","Data":"daaf26138d3a3204638462639ec16c525beb1c9517ff638f514321e6822e6279"} Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.466480 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=1.46644648 podStartE2EDuration="1.46644648s" podCreationTimestamp="2025-11-25 10:28:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:14.456925068 +0000 UTC m=+5954.582954641" watchObservedRunningTime="2025-11-25 10:28:14.46644648 +0000 UTC m=+5954.592476043" Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.625699 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.625943 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="fd35a8fd-f978-4863-98a7-9481c7ce10c9" containerName="nova-api-log" containerID="cri-o://d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993" gracePeriod=30 Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.626614 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="fd35a8fd-f978-4863-98a7-9481c7ce10c9" containerName="nova-api-api" containerID="cri-o://994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7" gracePeriod=30 Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.628308 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.628553 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="3a9aa1a5-880d-442a-bf77-e290e9acbe80" containerName="nova-scheduler-scheduler" containerID="cri-o://bb69ce3c7fbc919fbbb7fe2d6b0f77ce532a2c3e60d281184a27c6a26624ccef" gracePeriod=30 Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.650624 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.651046 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="859a89a7-6a7f-4699-a2e4-db62b1585b92" containerName="nova-metadata-log" containerID="cri-o://fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b" gracePeriod=30 Nov 25 10:28:14 crc kubenswrapper[4932]: I1125 10:28:14.651135 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="859a89a7-6a7f-4699-a2e4-db62b1585b92" containerName="nova-metadata-metadata" containerID="cri-o://e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a" gracePeriod=30 Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.249899 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.255473 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.330769 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-nova-metadata-tls-certs\") pod \"859a89a7-6a7f-4699-a2e4-db62b1585b92\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.330823 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-combined-ca-bundle\") pod \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.330860 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-config-data\") pod \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.330922 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/859a89a7-6a7f-4699-a2e4-db62b1585b92-logs\") pod \"859a89a7-6a7f-4699-a2e4-db62b1585b92\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.331550 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/859a89a7-6a7f-4699-a2e4-db62b1585b92-logs" (OuterVolumeSpecName: "logs") pod "859a89a7-6a7f-4699-a2e4-db62b1585b92" (UID: "859a89a7-6a7f-4699-a2e4-db62b1585b92"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.331660 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd35a8fd-f978-4863-98a7-9481c7ce10c9-logs\") pod \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.331954 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-combined-ca-bundle\") pod \"859a89a7-6a7f-4699-a2e4-db62b1585b92\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.332353 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9wbc4\" (UniqueName: \"kubernetes.io/projected/859a89a7-6a7f-4699-a2e4-db62b1585b92-kube-api-access-9wbc4\") pod \"859a89a7-6a7f-4699-a2e4-db62b1585b92\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.332472 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-config-data\") pod \"859a89a7-6a7f-4699-a2e4-db62b1585b92\" (UID: \"859a89a7-6a7f-4699-a2e4-db62b1585b92\") " Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.332506 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fk4l2\" (UniqueName: \"kubernetes.io/projected/fd35a8fd-f978-4863-98a7-9481c7ce10c9-kube-api-access-fk4l2\") pod \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\" (UID: \"fd35a8fd-f978-4863-98a7-9481c7ce10c9\") " Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.331891 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd35a8fd-f978-4863-98a7-9481c7ce10c9-logs" (OuterVolumeSpecName: "logs") pod "fd35a8fd-f978-4863-98a7-9481c7ce10c9" (UID: "fd35a8fd-f978-4863-98a7-9481c7ce10c9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.333314 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd35a8fd-f978-4863-98a7-9481c7ce10c9-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.333336 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/859a89a7-6a7f-4699-a2e4-db62b1585b92-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.336521 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd35a8fd-f978-4863-98a7-9481c7ce10c9-kube-api-access-fk4l2" (OuterVolumeSpecName: "kube-api-access-fk4l2") pod "fd35a8fd-f978-4863-98a7-9481c7ce10c9" (UID: "fd35a8fd-f978-4863-98a7-9481c7ce10c9"). InnerVolumeSpecName "kube-api-access-fk4l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.351076 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/859a89a7-6a7f-4699-a2e4-db62b1585b92-kube-api-access-9wbc4" (OuterVolumeSpecName: "kube-api-access-9wbc4") pod "859a89a7-6a7f-4699-a2e4-db62b1585b92" (UID: "859a89a7-6a7f-4699-a2e4-db62b1585b92"). InnerVolumeSpecName "kube-api-access-9wbc4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.357723 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-config-data" (OuterVolumeSpecName: "config-data") pod "859a89a7-6a7f-4699-a2e4-db62b1585b92" (UID: "859a89a7-6a7f-4699-a2e4-db62b1585b92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.357915 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-config-data" (OuterVolumeSpecName: "config-data") pod "fd35a8fd-f978-4863-98a7-9481c7ce10c9" (UID: "fd35a8fd-f978-4863-98a7-9481c7ce10c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.359085 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "859a89a7-6a7f-4699-a2e4-db62b1585b92" (UID: "859a89a7-6a7f-4699-a2e4-db62b1585b92"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.362247 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fd35a8fd-f978-4863-98a7-9481c7ce10c9" (UID: "fd35a8fd-f978-4863-98a7-9481c7ce10c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.384493 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "859a89a7-6a7f-4699-a2e4-db62b1585b92" (UID: "859a89a7-6a7f-4699-a2e4-db62b1585b92"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.435940 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.436293 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9wbc4\" (UniqueName: \"kubernetes.io/projected/859a89a7-6a7f-4699-a2e4-db62b1585b92-kube-api-access-9wbc4\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.436305 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.436314 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fk4l2\" (UniqueName: \"kubernetes.io/projected/fd35a8fd-f978-4863-98a7-9481c7ce10c9-kube-api-access-fk4l2\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.436324 4932 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/859a89a7-6a7f-4699-a2e4-db62b1585b92-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.436337 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.436372 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd35a8fd-f978-4863-98a7-9481c7ce10c9-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.455334 4932 generic.go:334] "Generic (PLEG): container finished" podID="859a89a7-6a7f-4699-a2e4-db62b1585b92" containerID="e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a" exitCode=0 Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.455361 4932 generic.go:334] "Generic (PLEG): container finished" podID="859a89a7-6a7f-4699-a2e4-db62b1585b92" containerID="fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b" exitCode=143 Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.455397 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"859a89a7-6a7f-4699-a2e4-db62b1585b92","Type":"ContainerDied","Data":"e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a"} Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.455425 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"859a89a7-6a7f-4699-a2e4-db62b1585b92","Type":"ContainerDied","Data":"fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b"} Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.455435 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"859a89a7-6a7f-4699-a2e4-db62b1585b92","Type":"ContainerDied","Data":"f2a9b0ae699a6db75a02d24c877fd3d1d62e8ac2cef70dace033bad50fac6b70"} Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.455450 4932 scope.go:117] "RemoveContainer" containerID="e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.455566 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.461993 4932 generic.go:334] "Generic (PLEG): container finished" podID="fd35a8fd-f978-4863-98a7-9481c7ce10c9" containerID="994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7" exitCode=0 Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.462025 4932 generic.go:334] "Generic (PLEG): container finished" podID="fd35a8fd-f978-4863-98a7-9481c7ce10c9" containerID="d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993" exitCode=143 Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.462057 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.462112 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fd35a8fd-f978-4863-98a7-9481c7ce10c9","Type":"ContainerDied","Data":"994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7"} Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.462160 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fd35a8fd-f978-4863-98a7-9481c7ce10c9","Type":"ContainerDied","Data":"d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993"} Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.462173 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"fd35a8fd-f978-4863-98a7-9481c7ce10c9","Type":"ContainerDied","Data":"daad0d4355bbfaf74613c3f09537c25994b36b60edff1b2c95dc1b833be19401"} Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.462448 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.492709 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.500373 4932 scope.go:117] "RemoveContainer" containerID="fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.523248 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.533409 4932 scope.go:117] "RemoveContainer" containerID="e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.536282 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:15 crc kubenswrapper[4932]: E1125 10:28:15.538666 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a\": container with ID starting with e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a not found: ID does not exist" containerID="e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.538731 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a"} err="failed to get container status \"e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a\": rpc error: code = NotFound desc = could not find container \"e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a\": container with ID starting with e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a not found: ID does not exist" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.538766 4932 scope.go:117] "RemoveContainer" containerID="fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b" Nov 25 10:28:15 crc kubenswrapper[4932]: E1125 10:28:15.543751 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b\": container with ID starting with fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b not found: ID does not exist" containerID="fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.543840 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b"} err="failed to get container status \"fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b\": rpc error: code = NotFound desc = could not find container \"fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b\": container with ID starting with fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b not found: ID does not exist" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.543877 4932 scope.go:117] "RemoveContainer" containerID="e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.544282 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a"} err="failed to get container status \"e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a\": rpc error: code = NotFound desc = could not find container \"e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a\": container with ID starting with e70cd8da5672d73e8f3be004204867f5a22dab51e3af1e8d5dc174bc59c86c7a not found: ID does not exist" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.544311 4932 scope.go:117] "RemoveContainer" containerID="fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.549943 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b"} err="failed to get container status \"fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b\": rpc error: code = NotFound desc = could not find container \"fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b\": container with ID starting with fe1ece609e57103eb1611fcdbd0ad5ccb87b32fec29ad8e660fe1980b3f41b7b not found: ID does not exist" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.550007 4932 scope.go:117] "RemoveContainer" containerID="994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.594119 4932 scope.go:117] "RemoveContainer" containerID="d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.594377 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:15 crc kubenswrapper[4932]: E1125 10:28:15.594852 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67c688f2-d79f-41e2-82d5-88b15fd52efd" containerName="nova-manage" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.594875 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="67c688f2-d79f-41e2-82d5-88b15fd52efd" containerName="nova-manage" Nov 25 10:28:15 crc kubenswrapper[4932]: E1125 10:28:15.594904 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd35a8fd-f978-4863-98a7-9481c7ce10c9" containerName="nova-api-api" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.594911 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd35a8fd-f978-4863-98a7-9481c7ce10c9" containerName="nova-api-api" Nov 25 10:28:15 crc kubenswrapper[4932]: E1125 10:28:15.594922 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="859a89a7-6a7f-4699-a2e4-db62b1585b92" containerName="nova-metadata-log" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.594928 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="859a89a7-6a7f-4699-a2e4-db62b1585b92" containerName="nova-metadata-log" Nov 25 10:28:15 crc kubenswrapper[4932]: E1125 10:28:15.594937 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="859a89a7-6a7f-4699-a2e4-db62b1585b92" containerName="nova-metadata-metadata" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.594943 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="859a89a7-6a7f-4699-a2e4-db62b1585b92" containerName="nova-metadata-metadata" Nov 25 10:28:15 crc kubenswrapper[4932]: E1125 10:28:15.594957 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd35a8fd-f978-4863-98a7-9481c7ce10c9" containerName="nova-api-log" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.594963 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd35a8fd-f978-4863-98a7-9481c7ce10c9" containerName="nova-api-log" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.595144 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd35a8fd-f978-4863-98a7-9481c7ce10c9" containerName="nova-api-api" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.595162 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="859a89a7-6a7f-4699-a2e4-db62b1585b92" containerName="nova-metadata-metadata" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.595178 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="67c688f2-d79f-41e2-82d5-88b15fd52efd" containerName="nova-manage" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.595206 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="859a89a7-6a7f-4699-a2e4-db62b1585b92" containerName="nova-metadata-log" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.595213 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd35a8fd-f978-4863-98a7-9481c7ce10c9" containerName="nova-api-log" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.596351 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.598765 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.599822 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.605323 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.618920 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.627255 4932 scope.go:117] "RemoveContainer" containerID="994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.627396 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:15 crc kubenswrapper[4932]: E1125 10:28:15.628693 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7\": container with ID starting with 994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7 not found: ID does not exist" containerID="994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.628763 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7"} err="failed to get container status \"994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7\": rpc error: code = NotFound desc = could not find container \"994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7\": container with ID starting with 994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7 not found: ID does not exist" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.628805 4932 scope.go:117] "RemoveContainer" containerID="d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.629339 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: E1125 10:28:15.629519 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993\": container with ID starting with d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993 not found: ID does not exist" containerID="d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.629568 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993"} err="failed to get container status \"d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993\": rpc error: code = NotFound desc = could not find container \"d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993\": container with ID starting with d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993 not found: ID does not exist" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.629627 4932 scope.go:117] "RemoveContainer" containerID="994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.631324 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7"} err="failed to get container status \"994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7\": rpc error: code = NotFound desc = could not find container \"994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7\": container with ID starting with 994fc4218648c24e3828178154d0b08912dcf8746ef8cb33a317d493f4c545f7 not found: ID does not exist" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.631357 4932 scope.go:117] "RemoveContainer" containerID="d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.631633 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993"} err="failed to get container status \"d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993\": rpc error: code = NotFound desc = could not find container \"d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993\": container with ID starting with d737cbb7d4d30960720df9dde5d7d3a30bc1ea25e2ced7bd28b7ea662ebbf993 not found: ID does not exist" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.632479 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.635455 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.648762 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.648818 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-logs\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.648878 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhv52\" (UniqueName: \"kubernetes.io/projected/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-kube-api-access-vhv52\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.648915 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-config-data\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.649015 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.751060 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.751117 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.751154 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-logs\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.751280 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhv52\" (UniqueName: \"kubernetes.io/projected/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-kube-api-access-vhv52\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.751357 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-config-data\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.751394 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-config-data\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.751452 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1923fb20-c577-41fe-928e-011100637dbe-logs\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.751544 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vt9fp\" (UniqueName: \"kubernetes.io/projected/1923fb20-c577-41fe-928e-011100637dbe-kube-api-access-vt9fp\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.751608 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.751794 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-logs\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.755548 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.755812 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.756776 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-config-data\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.772760 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhv52\" (UniqueName: \"kubernetes.io/projected/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-kube-api-access-vhv52\") pod \"nova-metadata-0\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.828351 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.853897 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.853995 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-config-data\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.854040 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1923fb20-c577-41fe-928e-011100637dbe-logs\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.854089 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vt9fp\" (UniqueName: \"kubernetes.io/projected/1923fb20-c577-41fe-928e-011100637dbe-kube-api-access-vt9fp\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.854583 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1923fb20-c577-41fe-928e-011100637dbe-logs\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.857856 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-config-data\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.858637 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.883038 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vt9fp\" (UniqueName: \"kubernetes.io/projected/1923fb20-c577-41fe-928e-011100637dbe-kube-api-access-vt9fp\") pod \"nova-api-0\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " pod="openstack/nova-api-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.888505 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59666c9775-qp4j2"] Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.888825 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" podUID="b63de457-6eac-4a4d-b603-86232313867c" containerName="dnsmasq-dns" containerID="cri-o://94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d" gracePeriod=10 Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.927210 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:15 crc kubenswrapper[4932]: I1125 10:28:15.952855 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.434700 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.483109 4932 generic.go:334] "Generic (PLEG): container finished" podID="b63de457-6eac-4a4d-b603-86232313867c" containerID="94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d" exitCode=0 Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.483866 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.484334 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" event={"ID":"b63de457-6eac-4a4d-b603-86232313867c","Type":"ContainerDied","Data":"94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d"} Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.484435 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59666c9775-qp4j2" event={"ID":"b63de457-6eac-4a4d-b603-86232313867c","Type":"ContainerDied","Data":"62a7b51e613d8173b296bfc77f38f7f3a729993bd449101363b5711a86f267f7"} Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.484454 4932 scope.go:117] "RemoveContainer" containerID="94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.509609 4932 scope.go:117] "RemoveContainer" containerID="6db83b93874e910ef50f7f8d8a20472a8f6847edff1d2bfc094cacad2c28c973" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.509629 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:16 crc kubenswrapper[4932]: W1125 10:28:16.518248 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b4e8689_c4b5_41d2_a3f6_83ee9eca0e2d.slice/crio-d663614c8e3aeaa7e73b0494f1bec402c2ae26830372bc6f0e9574314f44993a WatchSource:0}: Error finding container d663614c8e3aeaa7e73b0494f1bec402c2ae26830372bc6f0e9574314f44993a: Status 404 returned error can't find the container with id d663614c8e3aeaa7e73b0494f1bec402c2ae26830372bc6f0e9574314f44993a Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.529897 4932 scope.go:117] "RemoveContainer" containerID="94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d" Nov 25 10:28:16 crc kubenswrapper[4932]: E1125 10:28:16.531570 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d\": container with ID starting with 94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d not found: ID does not exist" containerID="94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.531601 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d"} err="failed to get container status \"94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d\": rpc error: code = NotFound desc = could not find container \"94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d\": container with ID starting with 94544bc19163bd5a637f007be40763de7ab8a94e547b22fedc80d12a15e05e7d not found: ID does not exist" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.531639 4932 scope.go:117] "RemoveContainer" containerID="6db83b93874e910ef50f7f8d8a20472a8f6847edff1d2bfc094cacad2c28c973" Nov 25 10:28:16 crc kubenswrapper[4932]: E1125 10:28:16.532943 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6db83b93874e910ef50f7f8d8a20472a8f6847edff1d2bfc094cacad2c28c973\": container with ID starting with 6db83b93874e910ef50f7f8d8a20472a8f6847edff1d2bfc094cacad2c28c973 not found: ID does not exist" containerID="6db83b93874e910ef50f7f8d8a20472a8f6847edff1d2bfc094cacad2c28c973" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.532991 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6db83b93874e910ef50f7f8d8a20472a8f6847edff1d2bfc094cacad2c28c973"} err="failed to get container status \"6db83b93874e910ef50f7f8d8a20472a8f6847edff1d2bfc094cacad2c28c973\": rpc error: code = NotFound desc = could not find container \"6db83b93874e910ef50f7f8d8a20472a8f6847edff1d2bfc094cacad2c28c973\": container with ID starting with 6db83b93874e910ef50f7f8d8a20472a8f6847edff1d2bfc094cacad2c28c973 not found: ID does not exist" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.556997 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:16 crc kubenswrapper[4932]: W1125 10:28:16.566588 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1923fb20_c577_41fe_928e_011100637dbe.slice/crio-1ebc64c6e6699ce23fd8c4b63d68f0dbb9084b7e2fd59ffb4f9d7f26da0f5d1b WatchSource:0}: Error finding container 1ebc64c6e6699ce23fd8c4b63d68f0dbb9084b7e2fd59ffb4f9d7f26da0f5d1b: Status 404 returned error can't find the container with id 1ebc64c6e6699ce23fd8c4b63d68f0dbb9084b7e2fd59ffb4f9d7f26da0f5d1b Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.566967 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-config\") pod \"b63de457-6eac-4a4d-b603-86232313867c\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.567147 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dm6sg\" (UniqueName: \"kubernetes.io/projected/b63de457-6eac-4a4d-b603-86232313867c-kube-api-access-dm6sg\") pod \"b63de457-6eac-4a4d-b603-86232313867c\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.567312 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-sb\") pod \"b63de457-6eac-4a4d-b603-86232313867c\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.567572 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-nb\") pod \"b63de457-6eac-4a4d-b603-86232313867c\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.567981 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-dns-svc\") pod \"b63de457-6eac-4a4d-b603-86232313867c\" (UID: \"b63de457-6eac-4a4d-b603-86232313867c\") " Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.572366 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b63de457-6eac-4a4d-b603-86232313867c-kube-api-access-dm6sg" (OuterVolumeSpecName: "kube-api-access-dm6sg") pod "b63de457-6eac-4a4d-b603-86232313867c" (UID: "b63de457-6eac-4a4d-b603-86232313867c"). InnerVolumeSpecName "kube-api-access-dm6sg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.617212 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b63de457-6eac-4a4d-b603-86232313867c" (UID: "b63de457-6eac-4a4d-b603-86232313867c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.619623 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="859a89a7-6a7f-4699-a2e4-db62b1585b92" path="/var/lib/kubelet/pods/859a89a7-6a7f-4699-a2e4-db62b1585b92/volumes" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.620353 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd35a8fd-f978-4863-98a7-9481c7ce10c9" path="/var/lib/kubelet/pods/fd35a8fd-f978-4863-98a7-9481c7ce10c9/volumes" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.623151 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b63de457-6eac-4a4d-b603-86232313867c" (UID: "b63de457-6eac-4a4d-b603-86232313867c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.626687 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b63de457-6eac-4a4d-b603-86232313867c" (UID: "b63de457-6eac-4a4d-b603-86232313867c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.642932 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-config" (OuterVolumeSpecName: "config") pod "b63de457-6eac-4a4d-b603-86232313867c" (UID: "b63de457-6eac-4a4d-b603-86232313867c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.670412 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.670855 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.670879 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dm6sg\" (UniqueName: \"kubernetes.io/projected/b63de457-6eac-4a4d-b603-86232313867c-kube-api-access-dm6sg\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.670894 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.670903 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63de457-6eac-4a4d-b603-86232313867c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.803274 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.922498 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59666c9775-qp4j2"] Nov 25 10:28:16 crc kubenswrapper[4932]: I1125 10:28:16.930349 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59666c9775-qp4j2"] Nov 25 10:28:17 crc kubenswrapper[4932]: I1125 10:28:17.493342 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1923fb20-c577-41fe-928e-011100637dbe","Type":"ContainerStarted","Data":"269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346"} Nov 25 10:28:17 crc kubenswrapper[4932]: I1125 10:28:17.494626 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1923fb20-c577-41fe-928e-011100637dbe","Type":"ContainerStarted","Data":"d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f"} Nov 25 10:28:17 crc kubenswrapper[4932]: I1125 10:28:17.494672 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1923fb20-c577-41fe-928e-011100637dbe","Type":"ContainerStarted","Data":"1ebc64c6e6699ce23fd8c4b63d68f0dbb9084b7e2fd59ffb4f9d7f26da0f5d1b"} Nov 25 10:28:17 crc kubenswrapper[4932]: I1125 10:28:17.496664 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d","Type":"ContainerStarted","Data":"049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350"} Nov 25 10:28:17 crc kubenswrapper[4932]: I1125 10:28:17.496711 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d","Type":"ContainerStarted","Data":"48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc"} Nov 25 10:28:17 crc kubenswrapper[4932]: I1125 10:28:17.496724 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d","Type":"ContainerStarted","Data":"d663614c8e3aeaa7e73b0494f1bec402c2ae26830372bc6f0e9574314f44993a"} Nov 25 10:28:17 crc kubenswrapper[4932]: I1125 10:28:17.510095 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.510076116 podStartE2EDuration="2.510076116s" podCreationTimestamp="2025-11-25 10:28:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:17.508026428 +0000 UTC m=+5957.634056021" watchObservedRunningTime="2025-11-25 10:28:17.510076116 +0000 UTC m=+5957.636105679" Nov 25 10:28:17 crc kubenswrapper[4932]: I1125 10:28:17.531942 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.5319194019999998 podStartE2EDuration="2.531919402s" podCreationTimestamp="2025-11-25 10:28:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:17.52869085 +0000 UTC m=+5957.654720433" watchObservedRunningTime="2025-11-25 10:28:17.531919402 +0000 UTC m=+5957.657948975" Nov 25 10:28:18 crc kubenswrapper[4932]: I1125 10:28:18.622784 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b63de457-6eac-4a4d-b603-86232313867c" path="/var/lib/kubelet/pods/b63de457-6eac-4a4d-b603-86232313867c/volumes" Nov 25 10:28:20 crc kubenswrapper[4932]: I1125 10:28:20.928422 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:28:20 crc kubenswrapper[4932]: I1125 10:28:20.928879 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:28:21 crc kubenswrapper[4932]: I1125 10:28:21.802741 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:21 crc kubenswrapper[4932]: I1125 10:28:21.823012 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:22 crc kubenswrapper[4932]: I1125 10:28:22.552472 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 25 10:28:23 crc kubenswrapper[4932]: I1125 10:28:23.829632 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.266859 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-97sj6"] Nov 25 10:28:24 crc kubenswrapper[4932]: E1125 10:28:24.267364 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b63de457-6eac-4a4d-b603-86232313867c" containerName="dnsmasq-dns" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.267389 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b63de457-6eac-4a4d-b603-86232313867c" containerName="dnsmasq-dns" Nov 25 10:28:24 crc kubenswrapper[4932]: E1125 10:28:24.267415 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b63de457-6eac-4a4d-b603-86232313867c" containerName="init" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.267423 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b63de457-6eac-4a4d-b603-86232313867c" containerName="init" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.267636 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b63de457-6eac-4a4d-b603-86232313867c" containerName="dnsmasq-dns" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.268491 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.270303 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.270627 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.276369 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-97sj6"] Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.321592 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-scripts\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.321982 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-config-data\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.322090 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.322123 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmg8n\" (UniqueName: \"kubernetes.io/projected/c0b67df6-dcee-42ea-a48c-88a17f961dda-kube-api-access-lmg8n\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.424347 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-config-data\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.424432 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.424466 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmg8n\" (UniqueName: \"kubernetes.io/projected/c0b67df6-dcee-42ea-a48c-88a17f961dda-kube-api-access-lmg8n\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.424529 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-scripts\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.430608 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.431177 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-scripts\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.439694 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-config-data\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.442070 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmg8n\" (UniqueName: \"kubernetes.io/projected/c0b67df6-dcee-42ea-a48c-88a17f961dda-kube-api-access-lmg8n\") pod \"nova-cell1-cell-mapping-97sj6\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:24 crc kubenswrapper[4932]: I1125 10:28:24.589079 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:25 crc kubenswrapper[4932]: I1125 10:28:25.049276 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-97sj6"] Nov 25 10:28:25 crc kubenswrapper[4932]: W1125 10:28:25.052934 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc0b67df6_dcee_42ea_a48c_88a17f961dda.slice/crio-f9beb2b8736e60bffc5e26c95f7577039efd9aeac8262e13eeca1a54a4da5af3 WatchSource:0}: Error finding container f9beb2b8736e60bffc5e26c95f7577039efd9aeac8262e13eeca1a54a4da5af3: Status 404 returned error can't find the container with id f9beb2b8736e60bffc5e26c95f7577039efd9aeac8262e13eeca1a54a4da5af3 Nov 25 10:28:25 crc kubenswrapper[4932]: I1125 10:28:25.561416 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-97sj6" event={"ID":"c0b67df6-dcee-42ea-a48c-88a17f961dda","Type":"ContainerStarted","Data":"2219ca4cd60852dcc5bf4c82697b810c23526082cb017dd46454887bdb3efafe"} Nov 25 10:28:25 crc kubenswrapper[4932]: I1125 10:28:25.561756 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-97sj6" event={"ID":"c0b67df6-dcee-42ea-a48c-88a17f961dda","Type":"ContainerStarted","Data":"f9beb2b8736e60bffc5e26c95f7577039efd9aeac8262e13eeca1a54a4da5af3"} Nov 25 10:28:25 crc kubenswrapper[4932]: I1125 10:28:25.601743 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-97sj6" podStartSLOduration=1.601728477 podStartE2EDuration="1.601728477s" podCreationTimestamp="2025-11-25 10:28:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:25.600551553 +0000 UTC m=+5965.726581116" watchObservedRunningTime="2025-11-25 10:28:25.601728477 +0000 UTC m=+5965.727758040" Nov 25 10:28:25 crc kubenswrapper[4932]: I1125 10:28:25.605671 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:28:25 crc kubenswrapper[4932]: E1125 10:28:25.605956 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:28:25 crc kubenswrapper[4932]: I1125 10:28:25.927583 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:28:25 crc kubenswrapper[4932]: I1125 10:28:25.927645 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:28:25 crc kubenswrapper[4932]: I1125 10:28:25.955646 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:28:25 crc kubenswrapper[4932]: I1125 10:28:25.955699 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:28:26 crc kubenswrapper[4932]: I1125 10:28:26.940397 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.94:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:28:26 crc kubenswrapper[4932]: I1125 10:28:26.941227 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.94:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:28:27 crc kubenswrapper[4932]: I1125 10:28:27.037436 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1923fb20-c577-41fe-928e-011100637dbe" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.95:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:28:27 crc kubenswrapper[4932]: I1125 10:28:27.037558 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="1923fb20-c577-41fe-928e-011100637dbe" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.95:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:28:30 crc kubenswrapper[4932]: I1125 10:28:30.621958 4932 generic.go:334] "Generic (PLEG): container finished" podID="c0b67df6-dcee-42ea-a48c-88a17f961dda" containerID="2219ca4cd60852dcc5bf4c82697b810c23526082cb017dd46454887bdb3efafe" exitCode=0 Nov 25 10:28:30 crc kubenswrapper[4932]: I1125 10:28:30.622008 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-97sj6" event={"ID":"c0b67df6-dcee-42ea-a48c-88a17f961dda","Type":"ContainerDied","Data":"2219ca4cd60852dcc5bf4c82697b810c23526082cb017dd46454887bdb3efafe"} Nov 25 10:28:31 crc kubenswrapper[4932]: I1125 10:28:31.941056 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:31 crc kubenswrapper[4932]: I1125 10:28:31.980535 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lmg8n\" (UniqueName: \"kubernetes.io/projected/c0b67df6-dcee-42ea-a48c-88a17f961dda-kube-api-access-lmg8n\") pod \"c0b67df6-dcee-42ea-a48c-88a17f961dda\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " Nov 25 10:28:31 crc kubenswrapper[4932]: I1125 10:28:31.980766 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-config-data\") pod \"c0b67df6-dcee-42ea-a48c-88a17f961dda\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " Nov 25 10:28:31 crc kubenswrapper[4932]: I1125 10:28:31.980942 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-combined-ca-bundle\") pod \"c0b67df6-dcee-42ea-a48c-88a17f961dda\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " Nov 25 10:28:31 crc kubenswrapper[4932]: I1125 10:28:31.980999 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-scripts\") pod \"c0b67df6-dcee-42ea-a48c-88a17f961dda\" (UID: \"c0b67df6-dcee-42ea-a48c-88a17f961dda\") " Nov 25 10:28:31 crc kubenswrapper[4932]: I1125 10:28:31.988524 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-scripts" (OuterVolumeSpecName: "scripts") pod "c0b67df6-dcee-42ea-a48c-88a17f961dda" (UID: "c0b67df6-dcee-42ea-a48c-88a17f961dda"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:31 crc kubenswrapper[4932]: I1125 10:28:31.989881 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0b67df6-dcee-42ea-a48c-88a17f961dda-kube-api-access-lmg8n" (OuterVolumeSpecName: "kube-api-access-lmg8n") pod "c0b67df6-dcee-42ea-a48c-88a17f961dda" (UID: "c0b67df6-dcee-42ea-a48c-88a17f961dda"). InnerVolumeSpecName "kube-api-access-lmg8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.013264 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0b67df6-dcee-42ea-a48c-88a17f961dda" (UID: "c0b67df6-dcee-42ea-a48c-88a17f961dda"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.015095 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-config-data" (OuterVolumeSpecName: "config-data") pod "c0b67df6-dcee-42ea-a48c-88a17f961dda" (UID: "c0b67df6-dcee-42ea-a48c-88a17f961dda"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.083243 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.083296 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.083314 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lmg8n\" (UniqueName: \"kubernetes.io/projected/c0b67df6-dcee-42ea-a48c-88a17f961dda-kube-api-access-lmg8n\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.083326 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0b67df6-dcee-42ea-a48c-88a17f961dda-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.640928 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-97sj6" event={"ID":"c0b67df6-dcee-42ea-a48c-88a17f961dda","Type":"ContainerDied","Data":"f9beb2b8736e60bffc5e26c95f7577039efd9aeac8262e13eeca1a54a4da5af3"} Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.640975 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9beb2b8736e60bffc5e26c95f7577039efd9aeac8262e13eeca1a54a4da5af3" Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.641003 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-97sj6" Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.823496 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.824019 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1923fb20-c577-41fe-928e-011100637dbe" containerName="nova-api-log" containerID="cri-o://d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f" gracePeriod=30 Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.824092 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="1923fb20-c577-41fe-928e-011100637dbe" containerName="nova-api-api" containerID="cri-o://269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346" gracePeriod=30 Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.894384 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.894665 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerName="nova-metadata-log" containerID="cri-o://48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc" gracePeriod=30 Nov 25 10:28:32 crc kubenswrapper[4932]: I1125 10:28:32.894805 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerName="nova-metadata-metadata" containerID="cri-o://049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350" gracePeriod=30 Nov 25 10:28:33 crc kubenswrapper[4932]: I1125 10:28:33.655533 4932 generic.go:334] "Generic (PLEG): container finished" podID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerID="48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc" exitCode=143 Nov 25 10:28:33 crc kubenswrapper[4932]: I1125 10:28:33.655616 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d","Type":"ContainerDied","Data":"48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc"} Nov 25 10:28:33 crc kubenswrapper[4932]: I1125 10:28:33.658768 4932 generic.go:334] "Generic (PLEG): container finished" podID="1923fb20-c577-41fe-928e-011100637dbe" containerID="d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f" exitCode=143 Nov 25 10:28:33 crc kubenswrapper[4932]: I1125 10:28:33.658800 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1923fb20-c577-41fe-928e-011100637dbe","Type":"ContainerDied","Data":"d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f"} Nov 25 10:28:37 crc kubenswrapper[4932]: I1125 10:28:37.606480 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:28:37 crc kubenswrapper[4932]: E1125 10:28:37.607470 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:28:44 crc kubenswrapper[4932]: I1125 10:28:44.766786 4932 generic.go:334] "Generic (PLEG): container finished" podID="3a9aa1a5-880d-442a-bf77-e290e9acbe80" containerID="bb69ce3c7fbc919fbbb7fe2d6b0f77ce532a2c3e60d281184a27c6a26624ccef" exitCode=137 Nov 25 10:28:44 crc kubenswrapper[4932]: I1125 10:28:44.766870 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3a9aa1a5-880d-442a-bf77-e290e9acbe80","Type":"ContainerDied","Data":"bb69ce3c7fbc919fbbb7fe2d6b0f77ce532a2c3e60d281184a27c6a26624ccef"} Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.022075 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.142510 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-combined-ca-bundle\") pod \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.142657 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jh47v\" (UniqueName: \"kubernetes.io/projected/3a9aa1a5-880d-442a-bf77-e290e9acbe80-kube-api-access-jh47v\") pod \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.142712 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-config-data\") pod \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\" (UID: \"3a9aa1a5-880d-442a-bf77-e290e9acbe80\") " Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.148971 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a9aa1a5-880d-442a-bf77-e290e9acbe80-kube-api-access-jh47v" (OuterVolumeSpecName: "kube-api-access-jh47v") pod "3a9aa1a5-880d-442a-bf77-e290e9acbe80" (UID: "3a9aa1a5-880d-442a-bf77-e290e9acbe80"). InnerVolumeSpecName "kube-api-access-jh47v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.171500 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3a9aa1a5-880d-442a-bf77-e290e9acbe80" (UID: "3a9aa1a5-880d-442a-bf77-e290e9acbe80"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.171684 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-config-data" (OuterVolumeSpecName: "config-data") pod "3a9aa1a5-880d-442a-bf77-e290e9acbe80" (UID: "3a9aa1a5-880d-442a-bf77-e290e9acbe80"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.245349 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jh47v\" (UniqueName: \"kubernetes.io/projected/3a9aa1a5-880d-442a-bf77-e290e9acbe80-kube-api-access-jh47v\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.245382 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.245392 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a9aa1a5-880d-442a-bf77-e290e9acbe80-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.775909 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3a9aa1a5-880d-442a-bf77-e290e9acbe80","Type":"ContainerDied","Data":"d845741920080002d0223cfaccce9ef639895c6ae24be8f1c7310db9c97c4bc3"} Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.775940 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.775958 4932 scope.go:117] "RemoveContainer" containerID="bb69ce3c7fbc919fbbb7fe2d6b0f77ce532a2c3e60d281184a27c6a26624ccef" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.810592 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.819651 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.835133 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:28:45 crc kubenswrapper[4932]: E1125 10:28:45.835840 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a9aa1a5-880d-442a-bf77-e290e9acbe80" containerName="nova-scheduler-scheduler" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.835862 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a9aa1a5-880d-442a-bf77-e290e9acbe80" containerName="nova-scheduler-scheduler" Nov 25 10:28:45 crc kubenswrapper[4932]: E1125 10:28:45.835881 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0b67df6-dcee-42ea-a48c-88a17f961dda" containerName="nova-manage" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.835890 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0b67df6-dcee-42ea-a48c-88a17f961dda" containerName="nova-manage" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.836157 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0b67df6-dcee-42ea-a48c-88a17f961dda" containerName="nova-manage" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.836183 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a9aa1a5-880d-442a-bf77-e290e9acbe80" containerName="nova-scheduler-scheduler" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.837124 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.839512 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.850994 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.953866 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.954155 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.958345 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s25mz\" (UniqueName: \"kubernetes.io/projected/a59eec03-6f3d-4392-b0c3-0c4aa76f30f3-kube-api-access-s25mz\") pod \"nova-scheduler-0\" (UID: \"a59eec03-6f3d-4392-b0c3-0c4aa76f30f3\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.958459 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a59eec03-6f3d-4392-b0c3-0c4aa76f30f3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a59eec03-6f3d-4392-b0c3-0c4aa76f30f3\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:45 crc kubenswrapper[4932]: I1125 10:28:45.958594 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a59eec03-6f3d-4392-b0c3-0c4aa76f30f3-config-data\") pod \"nova-scheduler-0\" (UID: \"a59eec03-6f3d-4392-b0c3-0c4aa76f30f3\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.060518 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s25mz\" (UniqueName: \"kubernetes.io/projected/a59eec03-6f3d-4392-b0c3-0c4aa76f30f3-kube-api-access-s25mz\") pod \"nova-scheduler-0\" (UID: \"a59eec03-6f3d-4392-b0c3-0c4aa76f30f3\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.060639 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a59eec03-6f3d-4392-b0c3-0c4aa76f30f3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a59eec03-6f3d-4392-b0c3-0c4aa76f30f3\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.060680 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a59eec03-6f3d-4392-b0c3-0c4aa76f30f3-config-data\") pod \"nova-scheduler-0\" (UID: \"a59eec03-6f3d-4392-b0c3-0c4aa76f30f3\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.064315 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a59eec03-6f3d-4392-b0c3-0c4aa76f30f3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a59eec03-6f3d-4392-b0c3-0c4aa76f30f3\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.064591 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a59eec03-6f3d-4392-b0c3-0c4aa76f30f3-config-data\") pod \"nova-scheduler-0\" (UID: \"a59eec03-6f3d-4392-b0c3-0c4aa76f30f3\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.081253 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s25mz\" (UniqueName: \"kubernetes.io/projected/a59eec03-6f3d-4392-b0c3-0c4aa76f30f3-kube-api-access-s25mz\") pod \"nova-scheduler-0\" (UID: \"a59eec03-6f3d-4392-b0c3-0c4aa76f30f3\") " pod="openstack/nova-scheduler-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.153966 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.591513 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.618007 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a9aa1a5-880d-442a-bf77-e290e9acbe80" path="/var/lib/kubelet/pods/3a9aa1a5-880d-442a-bf77-e290e9acbe80/volumes" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.638570 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.676180 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1923fb20-c577-41fe-928e-011100637dbe-logs\") pod \"1923fb20-c577-41fe-928e-011100637dbe\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.676260 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-combined-ca-bundle\") pod \"1923fb20-c577-41fe-928e-011100637dbe\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.676399 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt9fp\" (UniqueName: \"kubernetes.io/projected/1923fb20-c577-41fe-928e-011100637dbe-kube-api-access-vt9fp\") pod \"1923fb20-c577-41fe-928e-011100637dbe\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.676522 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-config-data\") pod \"1923fb20-c577-41fe-928e-011100637dbe\" (UID: \"1923fb20-c577-41fe-928e-011100637dbe\") " Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.676781 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1923fb20-c577-41fe-928e-011100637dbe-logs" (OuterVolumeSpecName: "logs") pod "1923fb20-c577-41fe-928e-011100637dbe" (UID: "1923fb20-c577-41fe-928e-011100637dbe"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.677019 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1923fb20-c577-41fe-928e-011100637dbe-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.680170 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1923fb20-c577-41fe-928e-011100637dbe-kube-api-access-vt9fp" (OuterVolumeSpecName: "kube-api-access-vt9fp") pod "1923fb20-c577-41fe-928e-011100637dbe" (UID: "1923fb20-c577-41fe-928e-011100637dbe"). InnerVolumeSpecName "kube-api-access-vt9fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.701613 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-config-data" (OuterVolumeSpecName: "config-data") pod "1923fb20-c577-41fe-928e-011100637dbe" (UID: "1923fb20-c577-41fe-928e-011100637dbe"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.703022 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1923fb20-c577-41fe-928e-011100637dbe" (UID: "1923fb20-c577-41fe-928e-011100637dbe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.734546 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.778594 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-combined-ca-bundle\") pod \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.778691 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-config-data\") pod \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.778757 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-logs\") pod \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.778786 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-nova-metadata-tls-certs\") pod \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.778809 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhv52\" (UniqueName: \"kubernetes.io/projected/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-kube-api-access-vhv52\") pod \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\" (UID: \"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d\") " Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.779867 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt9fp\" (UniqueName: \"kubernetes.io/projected/1923fb20-c577-41fe-928e-011100637dbe-kube-api-access-vt9fp\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.779889 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.779899 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1923fb20-c577-41fe-928e-011100637dbe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.780206 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-logs" (OuterVolumeSpecName: "logs") pod "4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" (UID: "4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.784751 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-kube-api-access-vhv52" (OuterVolumeSpecName: "kube-api-access-vhv52") pod "4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" (UID: "4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d"). InnerVolumeSpecName "kube-api-access-vhv52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.790179 4932 generic.go:334] "Generic (PLEG): container finished" podID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerID="049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350" exitCode=0 Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.790272 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d","Type":"ContainerDied","Data":"049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350"} Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.790310 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d","Type":"ContainerDied","Data":"d663614c8e3aeaa7e73b0494f1bec402c2ae26830372bc6f0e9574314f44993a"} Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.790331 4932 scope.go:117] "RemoveContainer" containerID="049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.790334 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.794807 4932 generic.go:334] "Generic (PLEG): container finished" podID="1923fb20-c577-41fe-928e-011100637dbe" containerID="269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346" exitCode=0 Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.794970 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1923fb20-c577-41fe-928e-011100637dbe","Type":"ContainerDied","Data":"269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346"} Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.795055 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"1923fb20-c577-41fe-928e-011100637dbe","Type":"ContainerDied","Data":"1ebc64c6e6699ce23fd8c4b63d68f0dbb9084b7e2fd59ffb4f9d7f26da0f5d1b"} Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.795157 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.798089 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a59eec03-6f3d-4392-b0c3-0c4aa76f30f3","Type":"ContainerStarted","Data":"2315e213f3e395ea50eee2d53e44e143b2337799ec9c63d40b6d9c9cf73ce47c"} Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.817741 4932 scope.go:117] "RemoveContainer" containerID="48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.839193 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.846314 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" (UID: "4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.847529 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-config-data" (OuterVolumeSpecName: "config-data") pod "4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" (UID: "4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.858506 4932 scope.go:117] "RemoveContainer" containerID="049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350" Nov 25 10:28:46 crc kubenswrapper[4932]: E1125 10:28:46.859082 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350\": container with ID starting with 049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350 not found: ID does not exist" containerID="049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.859117 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350"} err="failed to get container status \"049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350\": rpc error: code = NotFound desc = could not find container \"049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350\": container with ID starting with 049e3d421e6e05352998511de9e0aeccd3880a48afdc6651990fba77f95c6350 not found: ID does not exist" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.859143 4932 scope.go:117] "RemoveContainer" containerID="48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc" Nov 25 10:28:46 crc kubenswrapper[4932]: E1125 10:28:46.859450 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc\": container with ID starting with 48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc not found: ID does not exist" containerID="48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.859473 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc"} err="failed to get container status \"48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc\": rpc error: code = NotFound desc = could not find container \"48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc\": container with ID starting with 48ae868e5a6824fd891514348522142c246f8bc772d2cfb30888622bfc9bffdc not found: ID does not exist" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.859490 4932 scope.go:117] "RemoveContainer" containerID="269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.861976 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.871760 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:46 crc kubenswrapper[4932]: E1125 10:28:46.872261 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1923fb20-c577-41fe-928e-011100637dbe" containerName="nova-api-api" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.872277 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1923fb20-c577-41fe-928e-011100637dbe" containerName="nova-api-api" Nov 25 10:28:46 crc kubenswrapper[4932]: E1125 10:28:46.872306 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerName="nova-metadata-log" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.872313 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerName="nova-metadata-log" Nov 25 10:28:46 crc kubenswrapper[4932]: E1125 10:28:46.872320 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerName="nova-metadata-metadata" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.872326 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerName="nova-metadata-metadata" Nov 25 10:28:46 crc kubenswrapper[4932]: E1125 10:28:46.872350 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1923fb20-c577-41fe-928e-011100637dbe" containerName="nova-api-log" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.872355 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1923fb20-c577-41fe-928e-011100637dbe" containerName="nova-api-log" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.872514 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerName="nova-metadata-metadata" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.872534 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="1923fb20-c577-41fe-928e-011100637dbe" containerName="nova-api-api" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.872548 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="1923fb20-c577-41fe-928e-011100637dbe" containerName="nova-api-log" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.872562 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" containerName="nova-metadata-log" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.873707 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.876780 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.878063 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" (UID: "4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.881148 4932 scope.go:117] "RemoveContainer" containerID="d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.881383 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh677\" (UniqueName: \"kubernetes.io/projected/74038ee8-ffe2-4807-b350-96e607d7d254-kube-api-access-sh677\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.881570 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-config-data\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.881666 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74038ee8-ffe2-4807-b350-96e607d7d254-logs\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.881953 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.882099 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.882193 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.882306 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.882493 4932 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.883019 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhv52\" (UniqueName: \"kubernetes.io/projected/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d-kube-api-access-vhv52\") on node \"crc\" DevicePath \"\"" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.885280 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.905126 4932 scope.go:117] "RemoveContainer" containerID="269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346" Nov 25 10:28:46 crc kubenswrapper[4932]: E1125 10:28:46.905807 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346\": container with ID starting with 269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346 not found: ID does not exist" containerID="269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.905862 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346"} err="failed to get container status \"269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346\": rpc error: code = NotFound desc = could not find container \"269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346\": container with ID starting with 269042021fd58bd4a99738ecf0b54f89e2ed77ac79e3aec27f3cdf7e3de4d346 not found: ID does not exist" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.905896 4932 scope.go:117] "RemoveContainer" containerID="d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f" Nov 25 10:28:46 crc kubenswrapper[4932]: E1125 10:28:46.906299 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f\": container with ID starting with d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f not found: ID does not exist" containerID="d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.906326 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f"} err="failed to get container status \"d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f\": rpc error: code = NotFound desc = could not find container \"d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f\": container with ID starting with d8869674498bc0737731911fc7295c80033dd666197db8673f6d9d3e9b9b5a9f not found: ID does not exist" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.984255 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.984357 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh677\" (UniqueName: \"kubernetes.io/projected/74038ee8-ffe2-4807-b350-96e607d7d254-kube-api-access-sh677\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.984400 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-config-data\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.984437 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74038ee8-ffe2-4807-b350-96e607d7d254-logs\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.984910 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74038ee8-ffe2-4807-b350-96e607d7d254-logs\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.989502 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-config-data\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.990421 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:46 crc kubenswrapper[4932]: I1125 10:28:46.999006 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh677\" (UniqueName: \"kubernetes.io/projected/74038ee8-ffe2-4807-b350-96e607d7d254-kube-api-access-sh677\") pod \"nova-api-0\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " pod="openstack/nova-api-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.196962 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.202053 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.223251 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.241400 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.243356 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.246846 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.247209 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.277242 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.296617 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0731f239-1be6-4361-b7b7-aa879f10bb5f-logs\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.296761 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0731f239-1be6-4361-b7b7-aa879f10bb5f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.297132 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0731f239-1be6-4361-b7b7-aa879f10bb5f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.297170 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0731f239-1be6-4361-b7b7-aa879f10bb5f-config-data\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.297238 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jwn4\" (UniqueName: \"kubernetes.io/projected/0731f239-1be6-4361-b7b7-aa879f10bb5f-kube-api-access-5jwn4\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.399519 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0731f239-1be6-4361-b7b7-aa879f10bb5f-logs\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.399882 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0731f239-1be6-4361-b7b7-aa879f10bb5f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.399935 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0731f239-1be6-4361-b7b7-aa879f10bb5f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.399946 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0731f239-1be6-4361-b7b7-aa879f10bb5f-logs\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.399965 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0731f239-1be6-4361-b7b7-aa879f10bb5f-config-data\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.400036 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jwn4\" (UniqueName: \"kubernetes.io/projected/0731f239-1be6-4361-b7b7-aa879f10bb5f-kube-api-access-5jwn4\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.406307 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0731f239-1be6-4361-b7b7-aa879f10bb5f-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.409726 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0731f239-1be6-4361-b7b7-aa879f10bb5f-config-data\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.410628 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0731f239-1be6-4361-b7b7-aa879f10bb5f-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.428584 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jwn4\" (UniqueName: \"kubernetes.io/projected/0731f239-1be6-4361-b7b7-aa879f10bb5f-kube-api-access-5jwn4\") pod \"nova-metadata-0\" (UID: \"0731f239-1be6-4361-b7b7-aa879f10bb5f\") " pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.635745 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.661554 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:28:47 crc kubenswrapper[4932]: W1125 10:28:47.678775 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74038ee8_ffe2_4807_b350_96e607d7d254.slice/crio-999243399b284720da044a1b5c9159624f6d96c29106c538a645ae12ba30038b WatchSource:0}: Error finding container 999243399b284720da044a1b5c9159624f6d96c29106c538a645ae12ba30038b: Status 404 returned error can't find the container with id 999243399b284720da044a1b5c9159624f6d96c29106c538a645ae12ba30038b Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.816996 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a59eec03-6f3d-4392-b0c3-0c4aa76f30f3","Type":"ContainerStarted","Data":"daf54ad5532b2ead79f8d581bd2b4d2dcf6c5a00ee43db91fafb04b665a7196a"} Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.825633 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74038ee8-ffe2-4807-b350-96e607d7d254","Type":"ContainerStarted","Data":"999243399b284720da044a1b5c9159624f6d96c29106c538a645ae12ba30038b"} Nov 25 10:28:47 crc kubenswrapper[4932]: I1125 10:28:47.842929 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.842908998 podStartE2EDuration="2.842908998s" podCreationTimestamp="2025-11-25 10:28:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:47.831210773 +0000 UTC m=+5987.957240346" watchObservedRunningTime="2025-11-25 10:28:47.842908998 +0000 UTC m=+5987.968938561" Nov 25 10:28:48 crc kubenswrapper[4932]: I1125 10:28:48.096263 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 10:28:48 crc kubenswrapper[4932]: W1125 10:28:48.104152 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0731f239_1be6_4361_b7b7_aa879f10bb5f.slice/crio-f9d913136109c1d81800b08fd7ca4b270db04a902363d67b3cbd8601134f533f WatchSource:0}: Error finding container f9d913136109c1d81800b08fd7ca4b270db04a902363d67b3cbd8601134f533f: Status 404 returned error can't find the container with id f9d913136109c1d81800b08fd7ca4b270db04a902363d67b3cbd8601134f533f Nov 25 10:28:48 crc kubenswrapper[4932]: I1125 10:28:48.619132 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1923fb20-c577-41fe-928e-011100637dbe" path="/var/lib/kubelet/pods/1923fb20-c577-41fe-928e-011100637dbe/volumes" Nov 25 10:28:48 crc kubenswrapper[4932]: I1125 10:28:48.620188 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d" path="/var/lib/kubelet/pods/4b4e8689-c4b5-41d2-a3f6-83ee9eca0e2d/volumes" Nov 25 10:28:48 crc kubenswrapper[4932]: I1125 10:28:48.838366 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74038ee8-ffe2-4807-b350-96e607d7d254","Type":"ContainerStarted","Data":"de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d"} Nov 25 10:28:48 crc kubenswrapper[4932]: I1125 10:28:48.838420 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74038ee8-ffe2-4807-b350-96e607d7d254","Type":"ContainerStarted","Data":"bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a"} Nov 25 10:28:48 crc kubenswrapper[4932]: I1125 10:28:48.840025 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0731f239-1be6-4361-b7b7-aa879f10bb5f","Type":"ContainerStarted","Data":"a1ab423c529f4fcc28990ceff16c6c748ea87a34a992bde031d5f8349b65ee31"} Nov 25 10:28:48 crc kubenswrapper[4932]: I1125 10:28:48.840080 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0731f239-1be6-4361-b7b7-aa879f10bb5f","Type":"ContainerStarted","Data":"7035173b5cfc3ebd9e7be736ac4c0df0a328d03281113facad5ffe7ee6e59498"} Nov 25 10:28:48 crc kubenswrapper[4932]: I1125 10:28:48.840092 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0731f239-1be6-4361-b7b7-aa879f10bb5f","Type":"ContainerStarted","Data":"f9d913136109c1d81800b08fd7ca4b270db04a902363d67b3cbd8601134f533f"} Nov 25 10:28:48 crc kubenswrapper[4932]: I1125 10:28:48.865727 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.8656841760000002 podStartE2EDuration="2.865684176s" podCreationTimestamp="2025-11-25 10:28:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:48.856845353 +0000 UTC m=+5988.982874906" watchObservedRunningTime="2025-11-25 10:28:48.865684176 +0000 UTC m=+5988.991713739" Nov 25 10:28:48 crc kubenswrapper[4932]: I1125 10:28:48.881489 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=1.881467378 podStartE2EDuration="1.881467378s" podCreationTimestamp="2025-11-25 10:28:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:28:48.879832862 +0000 UTC m=+5989.005862445" watchObservedRunningTime="2025-11-25 10:28:48.881467378 +0000 UTC m=+5989.007496941" Nov 25 10:28:51 crc kubenswrapper[4932]: I1125 10:28:51.154148 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 10:28:52 crc kubenswrapper[4932]: I1125 10:28:52.605709 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:28:52 crc kubenswrapper[4932]: E1125 10:28:52.606283 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:28:52 crc kubenswrapper[4932]: I1125 10:28:52.635945 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:28:52 crc kubenswrapper[4932]: I1125 10:28:52.636287 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 10:28:56 crc kubenswrapper[4932]: I1125 10:28:56.155012 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 10:28:56 crc kubenswrapper[4932]: I1125 10:28:56.182362 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 10:28:56 crc kubenswrapper[4932]: I1125 10:28:56.942459 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 10:28:57 crc kubenswrapper[4932]: I1125 10:28:57.197446 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:28:57 crc kubenswrapper[4932]: I1125 10:28:57.197488 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:28:57 crc kubenswrapper[4932]: I1125 10:28:57.636693 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:28:57 crc kubenswrapper[4932]: I1125 10:28:57.636783 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 10:28:58 crc kubenswrapper[4932]: I1125 10:28:58.281450 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="74038ee8-ffe2-4807-b350-96e607d7d254" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.98:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:28:58 crc kubenswrapper[4932]: I1125 10:28:58.281695 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="74038ee8-ffe2-4807-b350-96e607d7d254" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.98:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:28:58 crc kubenswrapper[4932]: I1125 10:28:58.647449 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0731f239-1be6-4361-b7b7-aa879f10bb5f" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.99:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:28:58 crc kubenswrapper[4932]: I1125 10:28:58.647446 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0731f239-1be6-4361-b7b7-aa879f10bb5f" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.99:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:29:04 crc kubenswrapper[4932]: I1125 10:29:04.607049 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:29:04 crc kubenswrapper[4932]: E1125 10:29:04.607824 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:29:07 crc kubenswrapper[4932]: I1125 10:29:07.202618 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:29:07 crc kubenswrapper[4932]: I1125 10:29:07.203421 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:29:07 crc kubenswrapper[4932]: I1125 10:29:07.204270 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:29:07 crc kubenswrapper[4932]: I1125 10:29:07.208231 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:29:07 crc kubenswrapper[4932]: I1125 10:29:07.642468 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 10:29:07 crc kubenswrapper[4932]: I1125 10:29:07.643484 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 10:29:07 crc kubenswrapper[4932]: I1125 10:29:07.651709 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.009070 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.012900 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.013423 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.284182 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54d659b679-kgwkm"] Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.286208 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.288670 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-nb\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.288739 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9zkn\" (UniqueName: \"kubernetes.io/projected/382b04e3-773c-4858-b325-752d56d78660-kube-api-access-q9zkn\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.288804 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-sb\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.288940 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-dns-svc\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.289118 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-config\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.316261 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54d659b679-kgwkm"] Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.391115 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-nb\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.391168 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9zkn\" (UniqueName: \"kubernetes.io/projected/382b04e3-773c-4858-b325-752d56d78660-kube-api-access-q9zkn\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.391269 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-sb\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.391302 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-dns-svc\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.391367 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-config\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.392643 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-config\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.392644 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-sb\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.392920 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-nb\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.393783 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-dns-svc\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.411231 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9zkn\" (UniqueName: \"kubernetes.io/projected/382b04e3-773c-4858-b325-752d56d78660-kube-api-access-q9zkn\") pod \"dnsmasq-dns-54d659b679-kgwkm\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:08 crc kubenswrapper[4932]: I1125 10:29:08.611471 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:09 crc kubenswrapper[4932]: I1125 10:29:09.149816 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54d659b679-kgwkm"] Nov 25 10:29:10 crc kubenswrapper[4932]: I1125 10:29:10.026566 4932 generic.go:334] "Generic (PLEG): container finished" podID="382b04e3-773c-4858-b325-752d56d78660" containerID="885ae74df93c3614d983aff5ac3985c589ec024d4ad30c81f40b900caf74183f" exitCode=0 Nov 25 10:29:10 crc kubenswrapper[4932]: I1125 10:29:10.027447 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" event={"ID":"382b04e3-773c-4858-b325-752d56d78660","Type":"ContainerDied","Data":"885ae74df93c3614d983aff5ac3985c589ec024d4ad30c81f40b900caf74183f"} Nov 25 10:29:10 crc kubenswrapper[4932]: I1125 10:29:10.028119 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" event={"ID":"382b04e3-773c-4858-b325-752d56d78660","Type":"ContainerStarted","Data":"521ddab3d9c93ae3430ed0941019a182c00236a9859bbb3feb3c2e72043eabb1"} Nov 25 10:29:11 crc kubenswrapper[4932]: I1125 10:29:11.040174 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" event={"ID":"382b04e3-773c-4858-b325-752d56d78660","Type":"ContainerStarted","Data":"393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3"} Nov 25 10:29:11 crc kubenswrapper[4932]: I1125 10:29:11.040757 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:11 crc kubenswrapper[4932]: I1125 10:29:11.060730 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" podStartSLOduration=3.060712375 podStartE2EDuration="3.060712375s" podCreationTimestamp="2025-11-25 10:29:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:29:11.057091581 +0000 UTC m=+6011.183121154" watchObservedRunningTime="2025-11-25 10:29:11.060712375 +0000 UTC m=+6011.186741938" Nov 25 10:29:11 crc kubenswrapper[4932]: I1125 10:29:11.301176 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:29:11 crc kubenswrapper[4932]: I1125 10:29:11.301421 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="74038ee8-ffe2-4807-b350-96e607d7d254" containerName="nova-api-log" containerID="cri-o://bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a" gracePeriod=30 Nov 25 10:29:11 crc kubenswrapper[4932]: I1125 10:29:11.301565 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="74038ee8-ffe2-4807-b350-96e607d7d254" containerName="nova-api-api" containerID="cri-o://de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d" gracePeriod=30 Nov 25 10:29:12 crc kubenswrapper[4932]: I1125 10:29:12.051515 4932 generic.go:334] "Generic (PLEG): container finished" podID="74038ee8-ffe2-4807-b350-96e607d7d254" containerID="bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a" exitCode=143 Nov 25 10:29:12 crc kubenswrapper[4932]: I1125 10:29:12.051593 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74038ee8-ffe2-4807-b350-96e607d7d254","Type":"ContainerDied","Data":"bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a"} Nov 25 10:29:14 crc kubenswrapper[4932]: I1125 10:29:14.889609 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.028109 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-config-data\") pod \"74038ee8-ffe2-4807-b350-96e607d7d254\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.028344 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-combined-ca-bundle\") pod \"74038ee8-ffe2-4807-b350-96e607d7d254\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.028382 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sh677\" (UniqueName: \"kubernetes.io/projected/74038ee8-ffe2-4807-b350-96e607d7d254-kube-api-access-sh677\") pod \"74038ee8-ffe2-4807-b350-96e607d7d254\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.028413 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74038ee8-ffe2-4807-b350-96e607d7d254-logs\") pod \"74038ee8-ffe2-4807-b350-96e607d7d254\" (UID: \"74038ee8-ffe2-4807-b350-96e607d7d254\") " Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.029521 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74038ee8-ffe2-4807-b350-96e607d7d254-logs" (OuterVolumeSpecName: "logs") pod "74038ee8-ffe2-4807-b350-96e607d7d254" (UID: "74038ee8-ffe2-4807-b350-96e607d7d254"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.036458 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74038ee8-ffe2-4807-b350-96e607d7d254-kube-api-access-sh677" (OuterVolumeSpecName: "kube-api-access-sh677") pod "74038ee8-ffe2-4807-b350-96e607d7d254" (UID: "74038ee8-ffe2-4807-b350-96e607d7d254"). InnerVolumeSpecName "kube-api-access-sh677". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.064871 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74038ee8-ffe2-4807-b350-96e607d7d254" (UID: "74038ee8-ffe2-4807-b350-96e607d7d254"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.071370 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-config-data" (OuterVolumeSpecName: "config-data") pod "74038ee8-ffe2-4807-b350-96e607d7d254" (UID: "74038ee8-ffe2-4807-b350-96e607d7d254"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.086604 4932 generic.go:334] "Generic (PLEG): container finished" podID="74038ee8-ffe2-4807-b350-96e607d7d254" containerID="de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d" exitCode=0 Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.086656 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74038ee8-ffe2-4807-b350-96e607d7d254","Type":"ContainerDied","Data":"de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d"} Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.086667 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.086683 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"74038ee8-ffe2-4807-b350-96e607d7d254","Type":"ContainerDied","Data":"999243399b284720da044a1b5c9159624f6d96c29106c538a645ae12ba30038b"} Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.086703 4932 scope.go:117] "RemoveContainer" containerID="de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.153311 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.153656 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sh677\" (UniqueName: \"kubernetes.io/projected/74038ee8-ffe2-4807-b350-96e607d7d254-kube-api-access-sh677\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.153672 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/74038ee8-ffe2-4807-b350-96e607d7d254-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.153694 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74038ee8-ffe2-4807-b350-96e607d7d254-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.160245 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.164273 4932 scope.go:117] "RemoveContainer" containerID="bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.191264 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.192240 4932 scope.go:117] "RemoveContainer" containerID="de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d" Nov 25 10:29:15 crc kubenswrapper[4932]: E1125 10:29:15.194640 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d\": container with ID starting with de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d not found: ID does not exist" containerID="de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.194686 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d"} err="failed to get container status \"de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d\": rpc error: code = NotFound desc = could not find container \"de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d\": container with ID starting with de839b97c8ab59000c49e21e91d7f1d9dd810d95cbe011b469c984aef606026d not found: ID does not exist" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.194712 4932 scope.go:117] "RemoveContainer" containerID="bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a" Nov 25 10:29:15 crc kubenswrapper[4932]: E1125 10:29:15.195157 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a\": container with ID starting with bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a not found: ID does not exist" containerID="bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.195257 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a"} err="failed to get container status \"bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a\": rpc error: code = NotFound desc = could not find container \"bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a\": container with ID starting with bf8611fe0e98202f148028cb2c1f2cd9014d0340406eb87fe28e3cac421cd66a not found: ID does not exist" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.201312 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 10:29:15 crc kubenswrapper[4932]: E1125 10:29:15.201697 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74038ee8-ffe2-4807-b350-96e607d7d254" containerName="nova-api-api" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.201709 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="74038ee8-ffe2-4807-b350-96e607d7d254" containerName="nova-api-api" Nov 25 10:29:15 crc kubenswrapper[4932]: E1125 10:29:15.201729 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74038ee8-ffe2-4807-b350-96e607d7d254" containerName="nova-api-log" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.201734 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="74038ee8-ffe2-4807-b350-96e607d7d254" containerName="nova-api-log" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.201909 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="74038ee8-ffe2-4807-b350-96e607d7d254" containerName="nova-api-log" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.201931 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="74038ee8-ffe2-4807-b350-96e607d7d254" containerName="nova-api-api" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.202859 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.210830 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.211487 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.211605 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.212369 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.255650 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/719121ae-f1fb-4d40-ad60-62d61fccf6af-logs\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.255702 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5n8wm\" (UniqueName: \"kubernetes.io/projected/719121ae-f1fb-4d40-ad60-62d61fccf6af-kube-api-access-5n8wm\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.255866 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-public-tls-certs\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.255914 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-internal-tls-certs\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.255939 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.255955 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-config-data\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.357972 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-public-tls-certs\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.358093 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-internal-tls-certs\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.358135 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.358158 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-config-data\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.358268 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/719121ae-f1fb-4d40-ad60-62d61fccf6af-logs\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.358300 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5n8wm\" (UniqueName: \"kubernetes.io/projected/719121ae-f1fb-4d40-ad60-62d61fccf6af-kube-api-access-5n8wm\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.358984 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/719121ae-f1fb-4d40-ad60-62d61fccf6af-logs\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.362296 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.362391 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-config-data\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.362584 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-internal-tls-certs\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.365915 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/719121ae-f1fb-4d40-ad60-62d61fccf6af-public-tls-certs\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.375214 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5n8wm\" (UniqueName: \"kubernetes.io/projected/719121ae-f1fb-4d40-ad60-62d61fccf6af-kube-api-access-5n8wm\") pod \"nova-api-0\" (UID: \"719121ae-f1fb-4d40-ad60-62d61fccf6af\") " pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.528164 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.607016 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:29:15 crc kubenswrapper[4932]: I1125 10:29:15.990125 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 10:29:15 crc kubenswrapper[4932]: W1125 10:29:15.991464 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod719121ae_f1fb_4d40_ad60_62d61fccf6af.slice/crio-532fe836eea0b1dd52ae3e85f97b10c68705d74a2cbb31079a3bc61278151118 WatchSource:0}: Error finding container 532fe836eea0b1dd52ae3e85f97b10c68705d74a2cbb31079a3bc61278151118: Status 404 returned error can't find the container with id 532fe836eea0b1dd52ae3e85f97b10c68705d74a2cbb31079a3bc61278151118 Nov 25 10:29:16 crc kubenswrapper[4932]: I1125 10:29:16.111278 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"d3ec26a09840ae0db21e9656db7082103181dfd412ba17c268a25c18af62eb7f"} Nov 25 10:29:16 crc kubenswrapper[4932]: I1125 10:29:16.120620 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"719121ae-f1fb-4d40-ad60-62d61fccf6af","Type":"ContainerStarted","Data":"532fe836eea0b1dd52ae3e85f97b10c68705d74a2cbb31079a3bc61278151118"} Nov 25 10:29:16 crc kubenswrapper[4932]: I1125 10:29:16.618331 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74038ee8-ffe2-4807-b350-96e607d7d254" path="/var/lib/kubelet/pods/74038ee8-ffe2-4807-b350-96e607d7d254/volumes" Nov 25 10:29:17 crc kubenswrapper[4932]: I1125 10:29:17.130370 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"719121ae-f1fb-4d40-ad60-62d61fccf6af","Type":"ContainerStarted","Data":"d4700c03db657f62b2018692b569f0a193faec484ca4e6ddf74ffbe2e531a4d9"} Nov 25 10:29:17 crc kubenswrapper[4932]: I1125 10:29:17.130725 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"719121ae-f1fb-4d40-ad60-62d61fccf6af","Type":"ContainerStarted","Data":"e1af70f3ce9cc2223d3a8161122845ceb6d036ac43fda098ba79dfcc0f71b3d4"} Nov 25 10:29:17 crc kubenswrapper[4932]: I1125 10:29:17.149101 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.149078499 podStartE2EDuration="2.149078499s" podCreationTimestamp="2025-11-25 10:29:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:29:17.146401022 +0000 UTC m=+6017.272430605" watchObservedRunningTime="2025-11-25 10:29:17.149078499 +0000 UTC m=+6017.275108062" Nov 25 10:29:18 crc kubenswrapper[4932]: I1125 10:29:18.616275 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:29:18 crc kubenswrapper[4932]: I1125 10:29:18.682260 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-774d5f4bd7-zrqhp"] Nov 25 10:29:18 crc kubenswrapper[4932]: I1125 10:29:18.682748 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" podUID="519b4b37-e54b-400e-b6fb-2ecafb8b59fe" containerName="dnsmasq-dns" containerID="cri-o://3023e7835d395628606ba55336c1a0d6272dbe01c002094e67b5bc6ceb74ada5" gracePeriod=10 Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.149185 4932 generic.go:334] "Generic (PLEG): container finished" podID="519b4b37-e54b-400e-b6fb-2ecafb8b59fe" containerID="3023e7835d395628606ba55336c1a0d6272dbe01c002094e67b5bc6ceb74ada5" exitCode=0 Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.149294 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" event={"ID":"519b4b37-e54b-400e-b6fb-2ecafb8b59fe","Type":"ContainerDied","Data":"3023e7835d395628606ba55336c1a0d6272dbe01c002094e67b5bc6ceb74ada5"} Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.149551 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" event={"ID":"519b4b37-e54b-400e-b6fb-2ecafb8b59fe","Type":"ContainerDied","Data":"c0fe358ffd28384489c82702febb846378ea50e4db35e47f65757f7b8028b0bc"} Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.149565 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0fe358ffd28384489c82702febb846378ea50e4db35e47f65757f7b8028b0bc" Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.177205 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.255887 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-nb\") pod \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.255943 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-dns-svc\") pod \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.255981 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-sb\") pod \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.256092 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgd72\" (UniqueName: \"kubernetes.io/projected/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-kube-api-access-pgd72\") pod \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.256276 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-config\") pod \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\" (UID: \"519b4b37-e54b-400e-b6fb-2ecafb8b59fe\") " Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.261778 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-kube-api-access-pgd72" (OuterVolumeSpecName: "kube-api-access-pgd72") pod "519b4b37-e54b-400e-b6fb-2ecafb8b59fe" (UID: "519b4b37-e54b-400e-b6fb-2ecafb8b59fe"). InnerVolumeSpecName "kube-api-access-pgd72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.306328 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "519b4b37-e54b-400e-b6fb-2ecafb8b59fe" (UID: "519b4b37-e54b-400e-b6fb-2ecafb8b59fe"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.307022 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "519b4b37-e54b-400e-b6fb-2ecafb8b59fe" (UID: "519b4b37-e54b-400e-b6fb-2ecafb8b59fe"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.308653 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "519b4b37-e54b-400e-b6fb-2ecafb8b59fe" (UID: "519b4b37-e54b-400e-b6fb-2ecafb8b59fe"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.318926 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-config" (OuterVolumeSpecName: "config") pod "519b4b37-e54b-400e-b6fb-2ecafb8b59fe" (UID: "519b4b37-e54b-400e-b6fb-2ecafb8b59fe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.358150 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.358219 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.358231 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.358246 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pgd72\" (UniqueName: \"kubernetes.io/projected/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-kube-api-access-pgd72\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:19 crc kubenswrapper[4932]: I1125 10:29:19.358267 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/519b4b37-e54b-400e-b6fb-2ecafb8b59fe-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:20 crc kubenswrapper[4932]: I1125 10:29:20.158758 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-774d5f4bd7-zrqhp" Nov 25 10:29:20 crc kubenswrapper[4932]: I1125 10:29:20.195064 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-774d5f4bd7-zrqhp"] Nov 25 10:29:20 crc kubenswrapper[4932]: I1125 10:29:20.202983 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-774d5f4bd7-zrqhp"] Nov 25 10:29:20 crc kubenswrapper[4932]: I1125 10:29:20.621074 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="519b4b37-e54b-400e-b6fb-2ecafb8b59fe" path="/var/lib/kubelet/pods/519b4b37-e54b-400e-b6fb-2ecafb8b59fe/volumes" Nov 25 10:29:21 crc kubenswrapper[4932]: I1125 10:29:21.889097 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-7z8vf"] Nov 25 10:29:21 crc kubenswrapper[4932]: E1125 10:29:21.890875 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="519b4b37-e54b-400e-b6fb-2ecafb8b59fe" containerName="init" Nov 25 10:29:21 crc kubenswrapper[4932]: I1125 10:29:21.890898 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="519b4b37-e54b-400e-b6fb-2ecafb8b59fe" containerName="init" Nov 25 10:29:21 crc kubenswrapper[4932]: E1125 10:29:21.890925 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="519b4b37-e54b-400e-b6fb-2ecafb8b59fe" containerName="dnsmasq-dns" Nov 25 10:29:21 crc kubenswrapper[4932]: I1125 10:29:21.890931 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="519b4b37-e54b-400e-b6fb-2ecafb8b59fe" containerName="dnsmasq-dns" Nov 25 10:29:21 crc kubenswrapper[4932]: I1125 10:29:21.893713 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="519b4b37-e54b-400e-b6fb-2ecafb8b59fe" containerName="dnsmasq-dns" Nov 25 10:29:21 crc kubenswrapper[4932]: I1125 10:29:21.894520 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:21 crc kubenswrapper[4932]: I1125 10:29:21.900407 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 25 10:29:21 crc kubenswrapper[4932]: I1125 10:29:21.900410 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 25 10:29:21 crc kubenswrapper[4932]: I1125 10:29:21.914043 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-7z8vf"] Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.007630 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-ring-data-devices\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.007684 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-swiftconf\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.007750 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-dispersionconf\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.007782 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-scripts\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.007842 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d7459818-b9b9-4dab-b6e1-30a76004c1b3-etc-swift\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.007891 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-combined-ca-bundle\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.007957 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk5b7\" (UniqueName: \"kubernetes.io/projected/d7459818-b9b9-4dab-b6e1-30a76004c1b3-kube-api-access-sk5b7\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.109599 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d7459818-b9b9-4dab-b6e1-30a76004c1b3-etc-swift\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.109693 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-combined-ca-bundle\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.109753 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk5b7\" (UniqueName: \"kubernetes.io/projected/d7459818-b9b9-4dab-b6e1-30a76004c1b3-kube-api-access-sk5b7\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.109799 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-ring-data-devices\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.109819 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-swiftconf\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.109867 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-dispersionconf\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.109888 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-scripts\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.110729 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-ring-data-devices\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.111016 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d7459818-b9b9-4dab-b6e1-30a76004c1b3-etc-swift\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.112513 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-scripts\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.116631 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-swiftconf\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.116916 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-dispersionconf\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.117246 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-combined-ca-bundle\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.134783 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk5b7\" (UniqueName: \"kubernetes.io/projected/d7459818-b9b9-4dab-b6e1-30a76004c1b3-kube-api-access-sk5b7\") pod \"swift-ring-rebalance-7z8vf\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.217021 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:22 crc kubenswrapper[4932]: I1125 10:29:22.691257 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-7z8vf"] Nov 25 10:29:22 crc kubenswrapper[4932]: W1125 10:29:22.697253 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7459818_b9b9_4dab_b6e1_30a76004c1b3.slice/crio-4acda777abc4b9cf2f47c655435ea86294f4165d8bb79b421df94003209c0ffb WatchSource:0}: Error finding container 4acda777abc4b9cf2f47c655435ea86294f4165d8bb79b421df94003209c0ffb: Status 404 returned error can't find the container with id 4acda777abc4b9cf2f47c655435ea86294f4165d8bb79b421df94003209c0ffb Nov 25 10:29:23 crc kubenswrapper[4932]: I1125 10:29:23.187712 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7z8vf" event={"ID":"d7459818-b9b9-4dab-b6e1-30a76004c1b3","Type":"ContainerStarted","Data":"d36361e3ca257c3de12bb5fda3a95894f441da8e98590fe6f98d88266fad5d32"} Nov 25 10:29:23 crc kubenswrapper[4932]: I1125 10:29:23.188060 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7z8vf" event={"ID":"d7459818-b9b9-4dab-b6e1-30a76004c1b3","Type":"ContainerStarted","Data":"4acda777abc4b9cf2f47c655435ea86294f4165d8bb79b421df94003209c0ffb"} Nov 25 10:29:23 crc kubenswrapper[4932]: I1125 10:29:23.211790 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-7z8vf" podStartSLOduration=2.211770445 podStartE2EDuration="2.211770445s" podCreationTimestamp="2025-11-25 10:29:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:29:23.201495711 +0000 UTC m=+6023.327525294" watchObservedRunningTime="2025-11-25 10:29:23.211770445 +0000 UTC m=+6023.337799998" Nov 25 10:29:25 crc kubenswrapper[4932]: I1125 10:29:25.528462 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:29:25 crc kubenswrapper[4932]: I1125 10:29:25.529079 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 10:29:26 crc kubenswrapper[4932]: I1125 10:29:26.544432 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="719121ae-f1fb-4d40-ad60-62d61fccf6af" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.101:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:29:26 crc kubenswrapper[4932]: I1125 10:29:26.544658 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="719121ae-f1fb-4d40-ad60-62d61fccf6af" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.101:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 10:29:27 crc kubenswrapper[4932]: I1125 10:29:27.236565 4932 generic.go:334] "Generic (PLEG): container finished" podID="d7459818-b9b9-4dab-b6e1-30a76004c1b3" containerID="d36361e3ca257c3de12bb5fda3a95894f441da8e98590fe6f98d88266fad5d32" exitCode=0 Nov 25 10:29:27 crc kubenswrapper[4932]: I1125 10:29:27.236738 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7z8vf" event={"ID":"d7459818-b9b9-4dab-b6e1-30a76004c1b3","Type":"ContainerDied","Data":"d36361e3ca257c3de12bb5fda3a95894f441da8e98590fe6f98d88266fad5d32"} Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.575905 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.656106 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sk5b7\" (UniqueName: \"kubernetes.io/projected/d7459818-b9b9-4dab-b6e1-30a76004c1b3-kube-api-access-sk5b7\") pod \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.656315 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-ring-data-devices\") pod \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.656467 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-dispersionconf\") pod \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.656636 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-scripts\") pod \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.656665 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-combined-ca-bundle\") pod \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.656725 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d7459818-b9b9-4dab-b6e1-30a76004c1b3-etc-swift\") pod \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.656890 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-swiftconf\") pod \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\" (UID: \"d7459818-b9b9-4dab-b6e1-30a76004c1b3\") " Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.657797 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "d7459818-b9b9-4dab-b6e1-30a76004c1b3" (UID: "d7459818-b9b9-4dab-b6e1-30a76004c1b3"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.658740 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7459818-b9b9-4dab-b6e1-30a76004c1b3-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "d7459818-b9b9-4dab-b6e1-30a76004c1b3" (UID: "d7459818-b9b9-4dab-b6e1-30a76004c1b3"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.660851 4932 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d7459818-b9b9-4dab-b6e1-30a76004c1b3-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.660903 4932 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.664556 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7459818-b9b9-4dab-b6e1-30a76004c1b3-kube-api-access-sk5b7" (OuterVolumeSpecName: "kube-api-access-sk5b7") pod "d7459818-b9b9-4dab-b6e1-30a76004c1b3" (UID: "d7459818-b9b9-4dab-b6e1-30a76004c1b3"). InnerVolumeSpecName "kube-api-access-sk5b7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.686303 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "d7459818-b9b9-4dab-b6e1-30a76004c1b3" (UID: "d7459818-b9b9-4dab-b6e1-30a76004c1b3"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.686416 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "d7459818-b9b9-4dab-b6e1-30a76004c1b3" (UID: "d7459818-b9b9-4dab-b6e1-30a76004c1b3"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.691827 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-scripts" (OuterVolumeSpecName: "scripts") pod "d7459818-b9b9-4dab-b6e1-30a76004c1b3" (UID: "d7459818-b9b9-4dab-b6e1-30a76004c1b3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.695263 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d7459818-b9b9-4dab-b6e1-30a76004c1b3" (UID: "d7459818-b9b9-4dab-b6e1-30a76004c1b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.762949 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sk5b7\" (UniqueName: \"kubernetes.io/projected/d7459818-b9b9-4dab-b6e1-30a76004c1b3-kube-api-access-sk5b7\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.763002 4932 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.763016 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7459818-b9b9-4dab-b6e1-30a76004c1b3-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.763027 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:28 crc kubenswrapper[4932]: I1125 10:29:28.763041 4932 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d7459818-b9b9-4dab-b6e1-30a76004c1b3-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:29 crc kubenswrapper[4932]: I1125 10:29:29.260013 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7z8vf" event={"ID":"d7459818-b9b9-4dab-b6e1-30a76004c1b3","Type":"ContainerDied","Data":"4acda777abc4b9cf2f47c655435ea86294f4165d8bb79b421df94003209c0ffb"} Nov 25 10:29:29 crc kubenswrapper[4932]: I1125 10:29:29.260118 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4acda777abc4b9cf2f47c655435ea86294f4165d8bb79b421df94003209c0ffb" Nov 25 10:29:29 crc kubenswrapper[4932]: I1125 10:29:29.261401 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7z8vf" Nov 25 10:29:35 crc kubenswrapper[4932]: I1125 10:29:35.535486 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:29:35 crc kubenswrapper[4932]: I1125 10:29:35.536078 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 10:29:35 crc kubenswrapper[4932]: I1125 10:29:35.536714 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:29:35 crc kubenswrapper[4932]: I1125 10:29:35.536978 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 10:29:35 crc kubenswrapper[4932]: I1125 10:29:35.542957 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:29:35 crc kubenswrapper[4932]: I1125 10:29:35.545143 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 10:29:37 crc kubenswrapper[4932]: I1125 10:29:37.838922 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tlnw7"] Nov 25 10:29:37 crc kubenswrapper[4932]: E1125 10:29:37.839696 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7459818-b9b9-4dab-b6e1-30a76004c1b3" containerName="swift-ring-rebalance" Nov 25 10:29:37 crc kubenswrapper[4932]: I1125 10:29:37.839710 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7459818-b9b9-4dab-b6e1-30a76004c1b3" containerName="swift-ring-rebalance" Nov 25 10:29:37 crc kubenswrapper[4932]: I1125 10:29:37.839913 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7459818-b9b9-4dab-b6e1-30a76004c1b3" containerName="swift-ring-rebalance" Nov 25 10:29:37 crc kubenswrapper[4932]: I1125 10:29:37.845065 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:37 crc kubenswrapper[4932]: I1125 10:29:37.874762 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tlnw7"] Nov 25 10:29:37 crc kubenswrapper[4932]: I1125 10:29:37.965296 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7whz9\" (UniqueName: \"kubernetes.io/projected/3b0c5d97-8457-413a-902a-e28fb5366d1b-kube-api-access-7whz9\") pod \"redhat-operators-tlnw7\" (UID: \"3b0c5d97-8457-413a-902a-e28fb5366d1b\") " pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:37 crc kubenswrapper[4932]: I1125 10:29:37.965360 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b0c5d97-8457-413a-902a-e28fb5366d1b-utilities\") pod \"redhat-operators-tlnw7\" (UID: \"3b0c5d97-8457-413a-902a-e28fb5366d1b\") " pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:37 crc kubenswrapper[4932]: I1125 10:29:37.965540 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b0c5d97-8457-413a-902a-e28fb5366d1b-catalog-content\") pod \"redhat-operators-tlnw7\" (UID: \"3b0c5d97-8457-413a-902a-e28fb5366d1b\") " pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:38 crc kubenswrapper[4932]: I1125 10:29:38.066852 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7whz9\" (UniqueName: \"kubernetes.io/projected/3b0c5d97-8457-413a-902a-e28fb5366d1b-kube-api-access-7whz9\") pod \"redhat-operators-tlnw7\" (UID: \"3b0c5d97-8457-413a-902a-e28fb5366d1b\") " pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:38 crc kubenswrapper[4932]: I1125 10:29:38.067265 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b0c5d97-8457-413a-902a-e28fb5366d1b-utilities\") pod \"redhat-operators-tlnw7\" (UID: \"3b0c5d97-8457-413a-902a-e28fb5366d1b\") " pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:38 crc kubenswrapper[4932]: I1125 10:29:38.067421 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b0c5d97-8457-413a-902a-e28fb5366d1b-catalog-content\") pod \"redhat-operators-tlnw7\" (UID: \"3b0c5d97-8457-413a-902a-e28fb5366d1b\") " pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:38 crc kubenswrapper[4932]: I1125 10:29:38.067751 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b0c5d97-8457-413a-902a-e28fb5366d1b-utilities\") pod \"redhat-operators-tlnw7\" (UID: \"3b0c5d97-8457-413a-902a-e28fb5366d1b\") " pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:38 crc kubenswrapper[4932]: I1125 10:29:38.067783 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b0c5d97-8457-413a-902a-e28fb5366d1b-catalog-content\") pod \"redhat-operators-tlnw7\" (UID: \"3b0c5d97-8457-413a-902a-e28fb5366d1b\") " pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:38 crc kubenswrapper[4932]: I1125 10:29:38.088298 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7whz9\" (UniqueName: \"kubernetes.io/projected/3b0c5d97-8457-413a-902a-e28fb5366d1b-kube-api-access-7whz9\") pod \"redhat-operators-tlnw7\" (UID: \"3b0c5d97-8457-413a-902a-e28fb5366d1b\") " pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:38 crc kubenswrapper[4932]: I1125 10:29:38.177862 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:38 crc kubenswrapper[4932]: I1125 10:29:38.641234 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tlnw7"] Nov 25 10:29:39 crc kubenswrapper[4932]: I1125 10:29:39.351366 4932 generic.go:334] "Generic (PLEG): container finished" podID="3b0c5d97-8457-413a-902a-e28fb5366d1b" containerID="b9c1b95c19746e975643ea33ed480ecf0e21540f8fecbe789979fdabd17d9029" exitCode=0 Nov 25 10:29:39 crc kubenswrapper[4932]: I1125 10:29:39.351421 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlnw7" event={"ID":"3b0c5d97-8457-413a-902a-e28fb5366d1b","Type":"ContainerDied","Data":"b9c1b95c19746e975643ea33ed480ecf0e21540f8fecbe789979fdabd17d9029"} Nov 25 10:29:39 crc kubenswrapper[4932]: I1125 10:29:39.351666 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlnw7" event={"ID":"3b0c5d97-8457-413a-902a-e28fb5366d1b","Type":"ContainerStarted","Data":"f6a99fa1af04feff2aa77321c4fbb9982d7c57bd87b558901da96037f0a474d2"} Nov 25 10:29:39 crc kubenswrapper[4932]: I1125 10:29:39.354930 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:29:42 crc kubenswrapper[4932]: I1125 10:29:42.051145 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-n7pqr"] Nov 25 10:29:42 crc kubenswrapper[4932]: I1125 10:29:42.068619 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-7abc-account-create-f6jkl"] Nov 25 10:29:42 crc kubenswrapper[4932]: I1125 10:29:42.078254 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-7abc-account-create-f6jkl"] Nov 25 10:29:42 crc kubenswrapper[4932]: I1125 10:29:42.086753 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-n7pqr"] Nov 25 10:29:42 crc kubenswrapper[4932]: I1125 10:29:42.616070 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bc26237-8e21-4e9b-9f77-f3b234013bde" path="/var/lib/kubelet/pods/5bc26237-8e21-4e9b-9f77-f3b234013bde/volumes" Nov 25 10:29:42 crc kubenswrapper[4932]: I1125 10:29:42.616743 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9350488-929d-4db8-8a5e-5d17da1952b8" path="/var/lib/kubelet/pods/c9350488-929d-4db8-8a5e-5d17da1952b8/volumes" Nov 25 10:29:48 crc kubenswrapper[4932]: I1125 10:29:48.433762 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlnw7" event={"ID":"3b0c5d97-8457-413a-902a-e28fb5366d1b","Type":"ContainerStarted","Data":"e5f4de760d47c82c26ab485e0b28fb74d3f221c77488d2e59b4e586e2dc5451d"} Nov 25 10:29:49 crc kubenswrapper[4932]: I1125 10:29:49.030212 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-pg22p"] Nov 25 10:29:49 crc kubenswrapper[4932]: I1125 10:29:49.043082 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-pg22p"] Nov 25 10:29:50 crc kubenswrapper[4932]: I1125 10:29:50.618438 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ef2effb-8965-49db-8aa8-49fd55e6a149" path="/var/lib/kubelet/pods/7ef2effb-8965-49db-8aa8-49fd55e6a149/volumes" Nov 25 10:29:51 crc kubenswrapper[4932]: I1125 10:29:51.469807 4932 generic.go:334] "Generic (PLEG): container finished" podID="3b0c5d97-8457-413a-902a-e28fb5366d1b" containerID="e5f4de760d47c82c26ab485e0b28fb74d3f221c77488d2e59b4e586e2dc5451d" exitCode=0 Nov 25 10:29:51 crc kubenswrapper[4932]: I1125 10:29:51.469900 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlnw7" event={"ID":"3b0c5d97-8457-413a-902a-e28fb5366d1b","Type":"ContainerDied","Data":"e5f4de760d47c82c26ab485e0b28fb74d3f221c77488d2e59b4e586e2dc5451d"} Nov 25 10:29:52 crc kubenswrapper[4932]: I1125 10:29:52.483953 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tlnw7" event={"ID":"3b0c5d97-8457-413a-902a-e28fb5366d1b","Type":"ContainerStarted","Data":"78a262a111d3365f5c358d12b890c88b1260056211fea54c895f38ab010e330a"} Nov 25 10:29:52 crc kubenswrapper[4932]: I1125 10:29:52.512129 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tlnw7" podStartSLOduration=2.844285139 podStartE2EDuration="15.512103816s" podCreationTimestamp="2025-11-25 10:29:37 +0000 UTC" firstStartedPulling="2025-11-25 10:29:39.35464085 +0000 UTC m=+6039.480670413" lastFinishedPulling="2025-11-25 10:29:52.022459527 +0000 UTC m=+6052.148489090" observedRunningTime="2025-11-25 10:29:52.505031304 +0000 UTC m=+6052.631060867" watchObservedRunningTime="2025-11-25 10:29:52.512103816 +0000 UTC m=+6052.638133379" Nov 25 10:29:58 crc kubenswrapper[4932]: I1125 10:29:58.179106 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:58 crc kubenswrapper[4932]: I1125 10:29:58.181632 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:58 crc kubenswrapper[4932]: I1125 10:29:58.277061 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:58 crc kubenswrapper[4932]: I1125 10:29:58.584089 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tlnw7" Nov 25 10:29:58 crc kubenswrapper[4932]: I1125 10:29:58.655526 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tlnw7"] Nov 25 10:29:58 crc kubenswrapper[4932]: I1125 10:29:58.688941 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jp757"] Nov 25 10:29:58 crc kubenswrapper[4932]: I1125 10:29:58.689224 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jp757" podUID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" containerName="registry-server" containerID="cri-o://159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9" gracePeriod=2 Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.244713 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jp757" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.402940 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84rql\" (UniqueName: \"kubernetes.io/projected/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-kube-api-access-84rql\") pod \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.403086 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-catalog-content\") pod \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.403216 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-utilities\") pod \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\" (UID: \"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d\") " Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.403526 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-utilities" (OuterVolumeSpecName: "utilities") pod "6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" (UID: "6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.403726 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.410532 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-kube-api-access-84rql" (OuterVolumeSpecName: "kube-api-access-84rql") pod "6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" (UID: "6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d"). InnerVolumeSpecName "kube-api-access-84rql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.491450 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" (UID: "6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.505983 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.506023 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84rql\" (UniqueName: \"kubernetes.io/projected/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d-kube-api-access-84rql\") on node \"crc\" DevicePath \"\"" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.545582 4932 generic.go:334] "Generic (PLEG): container finished" podID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" containerID="159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9" exitCode=0 Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.545632 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jp757" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.545651 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jp757" event={"ID":"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d","Type":"ContainerDied","Data":"159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9"} Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.545684 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jp757" event={"ID":"6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d","Type":"ContainerDied","Data":"15103fb9bf3bcdc818d56c5486341bb712d6fe537eb6855091a6c664e24d91f5"} Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.545705 4932 scope.go:117] "RemoveContainer" containerID="159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.574643 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jp757"] Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.582377 4932 scope.go:117] "RemoveContainer" containerID="e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.587206 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jp757"] Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.607445 4932 scope.go:117] "RemoveContainer" containerID="dc23ab802f32d3709efb2f8b85db6ccf48eb172fc671b8ad6b005c5bd4b5dcee" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.646572 4932 scope.go:117] "RemoveContainer" containerID="159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9" Nov 25 10:29:59 crc kubenswrapper[4932]: E1125 10:29:59.647311 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9\": container with ID starting with 159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9 not found: ID does not exist" containerID="159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.647349 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9"} err="failed to get container status \"159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9\": rpc error: code = NotFound desc = could not find container \"159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9\": container with ID starting with 159763b90800ca7d47ab8fba8cbf71446c0605c6c7c01265c12c9d05a74bd4b9 not found: ID does not exist" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.647382 4932 scope.go:117] "RemoveContainer" containerID="e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6" Nov 25 10:29:59 crc kubenswrapper[4932]: E1125 10:29:59.648470 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6\": container with ID starting with e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6 not found: ID does not exist" containerID="e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.648508 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6"} err="failed to get container status \"e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6\": rpc error: code = NotFound desc = could not find container \"e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6\": container with ID starting with e52977060390a28fa6e57c6184cd4089a9227b149bdcf5d7d5ad30d60774d6f6 not found: ID does not exist" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.648533 4932 scope.go:117] "RemoveContainer" containerID="dc23ab802f32d3709efb2f8b85db6ccf48eb172fc671b8ad6b005c5bd4b5dcee" Nov 25 10:29:59 crc kubenswrapper[4932]: E1125 10:29:59.648830 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc23ab802f32d3709efb2f8b85db6ccf48eb172fc671b8ad6b005c5bd4b5dcee\": container with ID starting with dc23ab802f32d3709efb2f8b85db6ccf48eb172fc671b8ad6b005c5bd4b5dcee not found: ID does not exist" containerID="dc23ab802f32d3709efb2f8b85db6ccf48eb172fc671b8ad6b005c5bd4b5dcee" Nov 25 10:29:59 crc kubenswrapper[4932]: I1125 10:29:59.648851 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc23ab802f32d3709efb2f8b85db6ccf48eb172fc671b8ad6b005c5bd4b5dcee"} err="failed to get container status \"dc23ab802f32d3709efb2f8b85db6ccf48eb172fc671b8ad6b005c5bd4b5dcee\": rpc error: code = NotFound desc = could not find container \"dc23ab802f32d3709efb2f8b85db6ccf48eb172fc671b8ad6b005c5bd4b5dcee\": container with ID starting with dc23ab802f32d3709efb2f8b85db6ccf48eb172fc671b8ad6b005c5bd4b5dcee not found: ID does not exist" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.145987 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt"] Nov 25 10:30:00 crc kubenswrapper[4932]: E1125 10:30:00.146555 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" containerName="registry-server" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.146580 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" containerName="registry-server" Nov 25 10:30:00 crc kubenswrapper[4932]: E1125 10:30:00.146603 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" containerName="extract-content" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.146611 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" containerName="extract-content" Nov 25 10:30:00 crc kubenswrapper[4932]: E1125 10:30:00.146622 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" containerName="extract-utilities" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.146629 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" containerName="extract-utilities" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.146878 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" containerName="registry-server" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.147740 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.150803 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.150937 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.158400 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt"] Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.322512 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-secret-volume\") pod \"collect-profiles-29401110-tlvmt\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.322579 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-config-volume\") pod \"collect-profiles-29401110-tlvmt\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.322602 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8cm2\" (UniqueName: \"kubernetes.io/projected/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-kube-api-access-t8cm2\") pod \"collect-profiles-29401110-tlvmt\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.424644 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-secret-volume\") pod \"collect-profiles-29401110-tlvmt\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.424719 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-config-volume\") pod \"collect-profiles-29401110-tlvmt\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.424741 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8cm2\" (UniqueName: \"kubernetes.io/projected/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-kube-api-access-t8cm2\") pod \"collect-profiles-29401110-tlvmt\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.425913 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-config-volume\") pod \"collect-profiles-29401110-tlvmt\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.431247 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-secret-volume\") pod \"collect-profiles-29401110-tlvmt\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.446594 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8cm2\" (UniqueName: \"kubernetes.io/projected/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-kube-api-access-t8cm2\") pod \"collect-profiles-29401110-tlvmt\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.466617 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.641269 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d" path="/var/lib/kubelet/pods/6b9b79ce-bfdb-466e-96b6-5ed330ba1f2d/volumes" Nov 25 10:30:00 crc kubenswrapper[4932]: I1125 10:30:00.961919 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt"] Nov 25 10:30:00 crc kubenswrapper[4932]: W1125 10:30:00.971529 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13e9c5b3_f6b9_40f1_8ea3_8dc040e6960d.slice/crio-7f1895c762de47c253eb910bf483de0d51310854cf2d88cf635d982ed0ca7ac9 WatchSource:0}: Error finding container 7f1895c762de47c253eb910bf483de0d51310854cf2d88cf635d982ed0ca7ac9: Status 404 returned error can't find the container with id 7f1895c762de47c253eb910bf483de0d51310854cf2d88cf635d982ed0ca7ac9 Nov 25 10:30:01 crc kubenswrapper[4932]: I1125 10:30:01.579651 4932 generic.go:334] "Generic (PLEG): container finished" podID="13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d" containerID="6e20d6f41bf1de05055ee907ed3a799d0946b326a575dd568e170debd53ea853" exitCode=0 Nov 25 10:30:01 crc kubenswrapper[4932]: I1125 10:30:01.579742 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" event={"ID":"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d","Type":"ContainerDied","Data":"6e20d6f41bf1de05055ee907ed3a799d0946b326a575dd568e170debd53ea853"} Nov 25 10:30:01 crc kubenswrapper[4932]: I1125 10:30:01.579969 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" event={"ID":"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d","Type":"ContainerStarted","Data":"7f1895c762de47c253eb910bf483de0d51310854cf2d88cf635d982ed0ca7ac9"} Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.037346 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-pgksb"] Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.047552 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-pgksb"] Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.341673 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-8kh4g"] Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.343095 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.348583 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.348946 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-s7mw2" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.349028 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.363870 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-xbhld"] Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.366027 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.388868 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8kh4g"] Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.403342 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-xbhld"] Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.465515 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-etc-ovs\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.465561 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a0ace87-86aa-422c-88d3-fa3cde842197-var-run-ovn\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.465606 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-var-run\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.465637 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-var-log\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.465895 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kb89t\" (UniqueName: \"kubernetes.io/projected/c2066bcc-99df-41ac-9fb9-15320db7e342-kube-api-access-kb89t\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.466049 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phdd7\" (UniqueName: \"kubernetes.io/projected/1a0ace87-86aa-422c-88d3-fa3cde842197-kube-api-access-phdd7\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.466089 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a0ace87-86aa-422c-88d3-fa3cde842197-ovn-controller-tls-certs\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.466129 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a0ace87-86aa-422c-88d3-fa3cde842197-combined-ca-bundle\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.466177 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1a0ace87-86aa-422c-88d3-fa3cde842197-var-run\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.466279 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-var-lib\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.466303 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1a0ace87-86aa-422c-88d3-fa3cde842197-var-log-ovn\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.466384 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a0ace87-86aa-422c-88d3-fa3cde842197-scripts\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.466574 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2066bcc-99df-41ac-9fb9-15320db7e342-scripts\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568448 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-etc-ovs\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568495 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a0ace87-86aa-422c-88d3-fa3cde842197-var-run-ovn\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568532 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-var-run\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568554 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-var-log\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568602 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kb89t\" (UniqueName: \"kubernetes.io/projected/c2066bcc-99df-41ac-9fb9-15320db7e342-kube-api-access-kb89t\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568646 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phdd7\" (UniqueName: \"kubernetes.io/projected/1a0ace87-86aa-422c-88d3-fa3cde842197-kube-api-access-phdd7\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568671 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a0ace87-86aa-422c-88d3-fa3cde842197-ovn-controller-tls-certs\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568690 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a0ace87-86aa-422c-88d3-fa3cde842197-combined-ca-bundle\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568715 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1a0ace87-86aa-422c-88d3-fa3cde842197-var-run\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568739 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1a0ace87-86aa-422c-88d3-fa3cde842197-var-log-ovn\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568757 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-var-lib\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568784 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a0ace87-86aa-422c-88d3-fa3cde842197-scripts\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568812 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-etc-ovs\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568846 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2066bcc-99df-41ac-9fb9-15320db7e342-scripts\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568890 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1a0ace87-86aa-422c-88d3-fa3cde842197-var-run\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568901 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-var-run\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568846 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-var-log\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.568994 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1a0ace87-86aa-422c-88d3-fa3cde842197-var-log-ovn\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.569004 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c2066bcc-99df-41ac-9fb9-15320db7e342-var-lib\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.569845 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1a0ace87-86aa-422c-88d3-fa3cde842197-var-run-ovn\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.571073 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a0ace87-86aa-422c-88d3-fa3cde842197-scripts\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.571603 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c2066bcc-99df-41ac-9fb9-15320db7e342-scripts\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.575187 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a0ace87-86aa-422c-88d3-fa3cde842197-ovn-controller-tls-certs\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.576391 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a0ace87-86aa-422c-88d3-fa3cde842197-combined-ca-bundle\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.589264 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kb89t\" (UniqueName: \"kubernetes.io/projected/c2066bcc-99df-41ac-9fb9-15320db7e342-kube-api-access-kb89t\") pod \"ovn-controller-ovs-xbhld\" (UID: \"c2066bcc-99df-41ac-9fb9-15320db7e342\") " pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.589263 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phdd7\" (UniqueName: \"kubernetes.io/projected/1a0ace87-86aa-422c-88d3-fa3cde842197-kube-api-access-phdd7\") pod \"ovn-controller-8kh4g\" (UID: \"1a0ace87-86aa-422c-88d3-fa3cde842197\") " pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.633812 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d089d8f-788c-4673-857c-dfd4289325a2" path="/var/lib/kubelet/pods/0d089d8f-788c-4673-857c-dfd4289325a2/volumes" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.672151 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.692281 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.930819 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.993119 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8cm2\" (UniqueName: \"kubernetes.io/projected/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-kube-api-access-t8cm2\") pod \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.993239 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-secret-volume\") pod \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.993326 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-config-volume\") pod \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\" (UID: \"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d\") " Nov 25 10:30:02 crc kubenswrapper[4932]: I1125 10:30:02.994708 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-config-volume" (OuterVolumeSpecName: "config-volume") pod "13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d" (UID: "13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.011479 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-kube-api-access-t8cm2" (OuterVolumeSpecName: "kube-api-access-t8cm2") pod "13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d" (UID: "13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d"). InnerVolumeSpecName "kube-api-access-t8cm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.017385 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d" (UID: "13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.097150 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.097207 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8cm2\" (UniqueName: \"kubernetes.io/projected/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-kube-api-access-t8cm2\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.097225 4932 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:03 crc kubenswrapper[4932]: W1125 10:30:03.209370 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1a0ace87_86aa_422c_88d3_fa3cde842197.slice/crio-88184c3acf62219798f0e992fc9608645c4a02fa39250bc3d348b914d0641fcb WatchSource:0}: Error finding container 88184c3acf62219798f0e992fc9608645c4a02fa39250bc3d348b914d0641fcb: Status 404 returned error can't find the container with id 88184c3acf62219798f0e992fc9608645c4a02fa39250bc3d348b914d0641fcb Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.212444 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8kh4g"] Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.559701 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-xbhld"] Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.602713 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xbhld" event={"ID":"c2066bcc-99df-41ac-9fb9-15320db7e342","Type":"ContainerStarted","Data":"33a62aa382441b112132de58a76c646b06fafde9c9f7def1400b5b44cb6fb5e3"} Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.607354 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.607401 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt" event={"ID":"13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d","Type":"ContainerDied","Data":"7f1895c762de47c253eb910bf483de0d51310854cf2d88cf635d982ed0ca7ac9"} Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.607430 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f1895c762de47c253eb910bf483de0d51310854cf2d88cf635d982ed0ca7ac9" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.611041 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8kh4g" event={"ID":"1a0ace87-86aa-422c-88d3-fa3cde842197","Type":"ContainerStarted","Data":"9d2aaaff21aebd08272a4caa951da0bfe24e2e5421a196372b92b3410089a0fc"} Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.611109 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8kh4g" event={"ID":"1a0ace87-86aa-422c-88d3-fa3cde842197","Type":"ContainerStarted","Data":"88184c3acf62219798f0e992fc9608645c4a02fa39250bc3d348b914d0641fcb"} Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.611160 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:03 crc kubenswrapper[4932]: E1125 10:30:03.740370 4932 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod13e9c5b3_f6b9_40f1_8ea3_8dc040e6960d.slice\": RecentStats: unable to find data in memory cache]" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.898026 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-8kh4g" podStartSLOduration=1.898005186 podStartE2EDuration="1.898005186s" podCreationTimestamp="2025-11-25 10:30:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:30:03.636855514 +0000 UTC m=+6063.762885077" watchObservedRunningTime="2025-11-25 10:30:03.898005186 +0000 UTC m=+6064.024034749" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.908867 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-8jp7z"] Nov 25 10:30:03 crc kubenswrapper[4932]: E1125 10:30:03.909393 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d" containerName="collect-profiles" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.909412 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d" containerName="collect-profiles" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.909610 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d" containerName="collect-profiles" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.910415 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.925098 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 10:30:03 crc kubenswrapper[4932]: I1125 10:30:03.930704 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-8jp7z"] Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.003026 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk"] Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.015914 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401065-mb8gk"] Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.016515 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.016580 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-ovn-rundir\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.016625 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-combined-ca-bundle\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.016686 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-config\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.017140 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tk4l8\" (UniqueName: \"kubernetes.io/projected/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-kube-api-access-tk4l8\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.017378 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-ovs-rundir\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.119228 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.119282 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-ovn-rundir\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.119311 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-combined-ca-bundle\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.119356 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-config\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.119407 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tk4l8\" (UniqueName: \"kubernetes.io/projected/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-kube-api-access-tk4l8\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.119494 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-ovs-rundir\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.119634 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-ovs-rundir\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.119634 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-ovn-rundir\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.120314 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-config\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.125434 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-combined-ca-bundle\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.134006 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.136974 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tk4l8\" (UniqueName: \"kubernetes.io/projected/b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4-kube-api-access-tk4l8\") pod \"ovn-controller-metrics-8jp7z\" (UID: \"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4\") " pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.551047 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-8jp7z" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.637796 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79047c8e-3944-490c-bc62-61352910d301" path="/var/lib/kubelet/pods/79047c8e-3944-490c-bc62-61352910d301/volumes" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.644093 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xbhld" event={"ID":"c2066bcc-99df-41ac-9fb9-15320db7e342","Type":"ContainerStarted","Data":"0fe0bc255a8cdf62fc1fadb8a56c1c013a361151cfe7e194866dde4e7fae3fe2"} Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.699410 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-create-jvzv8"] Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.701766 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-jvzv8" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.709776 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-jvzv8"] Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.757516 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rg8sh\" (UniqueName: \"kubernetes.io/projected/fb5c2557-70fa-42c9-b3c1-739a5a34a558-kube-api-access-rg8sh\") pod \"octavia-db-create-jvzv8\" (UID: \"fb5c2557-70fa-42c9-b3c1-739a5a34a558\") " pod="openstack/octavia-db-create-jvzv8" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.757592 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb5c2557-70fa-42c9-b3c1-739a5a34a558-operator-scripts\") pod \"octavia-db-create-jvzv8\" (UID: \"fb5c2557-70fa-42c9-b3c1-739a5a34a558\") " pod="openstack/octavia-db-create-jvzv8" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.858919 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rg8sh\" (UniqueName: \"kubernetes.io/projected/fb5c2557-70fa-42c9-b3c1-739a5a34a558-kube-api-access-rg8sh\") pod \"octavia-db-create-jvzv8\" (UID: \"fb5c2557-70fa-42c9-b3c1-739a5a34a558\") " pod="openstack/octavia-db-create-jvzv8" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.858987 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb5c2557-70fa-42c9-b3c1-739a5a34a558-operator-scripts\") pod \"octavia-db-create-jvzv8\" (UID: \"fb5c2557-70fa-42c9-b3c1-739a5a34a558\") " pod="openstack/octavia-db-create-jvzv8" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.859905 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb5c2557-70fa-42c9-b3c1-739a5a34a558-operator-scripts\") pod \"octavia-db-create-jvzv8\" (UID: \"fb5c2557-70fa-42c9-b3c1-739a5a34a558\") " pod="openstack/octavia-db-create-jvzv8" Nov 25 10:30:04 crc kubenswrapper[4932]: I1125 10:30:04.888730 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rg8sh\" (UniqueName: \"kubernetes.io/projected/fb5c2557-70fa-42c9-b3c1-739a5a34a558-kube-api-access-rg8sh\") pod \"octavia-db-create-jvzv8\" (UID: \"fb5c2557-70fa-42c9-b3c1-739a5a34a558\") " pod="openstack/octavia-db-create-jvzv8" Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.047663 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-jvzv8" Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.097108 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-8jp7z"] Nov 25 10:30:05 crc kubenswrapper[4932]: W1125 10:30:05.106812 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1b72cd0_93ce_4e9c_b2a2_cd17601f3ab4.slice/crio-7c8d659c18cb65f666a8f2afcc25fbfb9a53d003a2d30ae22fba782400ea6f5c WatchSource:0}: Error finding container 7c8d659c18cb65f666a8f2afcc25fbfb9a53d003a2d30ae22fba782400ea6f5c: Status 404 returned error can't find the container with id 7c8d659c18cb65f666a8f2afcc25fbfb9a53d003a2d30ae22fba782400ea6f5c Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.499817 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-jvzv8"] Nov 25 10:30:05 crc kubenswrapper[4932]: W1125 10:30:05.503486 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb5c2557_70fa_42c9_b3c1_739a5a34a558.slice/crio-82cc4b43a357b58ff51d4cfaeeec4dc6f8d27efd8f75611ebbb30d29dee94cee WatchSource:0}: Error finding container 82cc4b43a357b58ff51d4cfaeeec4dc6f8d27efd8f75611ebbb30d29dee94cee: Status 404 returned error can't find the container with id 82cc4b43a357b58ff51d4cfaeeec4dc6f8d27efd8f75611ebbb30d29dee94cee Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.661805 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-jvzv8" event={"ID":"fb5c2557-70fa-42c9-b3c1-739a5a34a558","Type":"ContainerStarted","Data":"248889e36b5e6688aa0c60593add36d74d08b73d7d739712cae25f5b5a0d4520"} Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.662131 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-jvzv8" event={"ID":"fb5c2557-70fa-42c9-b3c1-739a5a34a558","Type":"ContainerStarted","Data":"82cc4b43a357b58ff51d4cfaeeec4dc6f8d27efd8f75611ebbb30d29dee94cee"} Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.663541 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-8jp7z" event={"ID":"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4","Type":"ContainerStarted","Data":"a7f609ced59fc9d02164c3a7aef901b312b40c0c607f8d17531e972f24010cf6"} Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.663592 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-8jp7z" event={"ID":"b1b72cd0-93ce-4e9c-b2a2-cd17601f3ab4","Type":"ContainerStarted","Data":"7c8d659c18cb65f666a8f2afcc25fbfb9a53d003a2d30ae22fba782400ea6f5c"} Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.668599 4932 generic.go:334] "Generic (PLEG): container finished" podID="c2066bcc-99df-41ac-9fb9-15320db7e342" containerID="0fe0bc255a8cdf62fc1fadb8a56c1c013a361151cfe7e194866dde4e7fae3fe2" exitCode=0 Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.668642 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xbhld" event={"ID":"c2066bcc-99df-41ac-9fb9-15320db7e342","Type":"ContainerDied","Data":"0fe0bc255a8cdf62fc1fadb8a56c1c013a361151cfe7e194866dde4e7fae3fe2"} Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.688893 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-db-create-jvzv8" podStartSLOduration=1.688871615 podStartE2EDuration="1.688871615s" podCreationTimestamp="2025-11-25 10:30:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:30:05.675249915 +0000 UTC m=+6065.801279488" watchObservedRunningTime="2025-11-25 10:30:05.688871615 +0000 UTC m=+6065.814901178" Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.742338 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-f71b-account-create-jpl8d"] Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.744320 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-f71b-account-create-jpl8d" Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.747070 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-db-secret" Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.773673 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-8jp7z" podStartSLOduration=2.773653351 podStartE2EDuration="2.773653351s" podCreationTimestamp="2025-11-25 10:30:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:30:05.719598484 +0000 UTC m=+6065.845628057" watchObservedRunningTime="2025-11-25 10:30:05.773653351 +0000 UTC m=+6065.899682914" Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.774914 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-f71b-account-create-jpl8d"] Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.878847 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dh6xc\" (UniqueName: \"kubernetes.io/projected/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-kube-api-access-dh6xc\") pod \"octavia-f71b-account-create-jpl8d\" (UID: \"9a7eaed6-9c4a-4070-92b9-438cf5907c7e\") " pod="openstack/octavia-f71b-account-create-jpl8d" Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.878918 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-operator-scripts\") pod \"octavia-f71b-account-create-jpl8d\" (UID: \"9a7eaed6-9c4a-4070-92b9-438cf5907c7e\") " pod="openstack/octavia-f71b-account-create-jpl8d" Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.980130 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dh6xc\" (UniqueName: \"kubernetes.io/projected/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-kube-api-access-dh6xc\") pod \"octavia-f71b-account-create-jpl8d\" (UID: \"9a7eaed6-9c4a-4070-92b9-438cf5907c7e\") " pod="openstack/octavia-f71b-account-create-jpl8d" Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.980228 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-operator-scripts\") pod \"octavia-f71b-account-create-jpl8d\" (UID: \"9a7eaed6-9c4a-4070-92b9-438cf5907c7e\") " pod="openstack/octavia-f71b-account-create-jpl8d" Nov 25 10:30:05 crc kubenswrapper[4932]: I1125 10:30:05.980950 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-operator-scripts\") pod \"octavia-f71b-account-create-jpl8d\" (UID: \"9a7eaed6-9c4a-4070-92b9-438cf5907c7e\") " pod="openstack/octavia-f71b-account-create-jpl8d" Nov 25 10:30:06 crc kubenswrapper[4932]: I1125 10:30:06.009075 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dh6xc\" (UniqueName: \"kubernetes.io/projected/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-kube-api-access-dh6xc\") pod \"octavia-f71b-account-create-jpl8d\" (UID: \"9a7eaed6-9c4a-4070-92b9-438cf5907c7e\") " pod="openstack/octavia-f71b-account-create-jpl8d" Nov 25 10:30:06 crc kubenswrapper[4932]: I1125 10:30:06.078996 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-f71b-account-create-jpl8d" Nov 25 10:30:06 crc kubenswrapper[4932]: I1125 10:30:06.569411 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-f71b-account-create-jpl8d"] Nov 25 10:30:06 crc kubenswrapper[4932]: W1125 10:30:06.571846 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a7eaed6_9c4a_4070_92b9_438cf5907c7e.slice/crio-23c885d27a3acc83fb83896ce6b12bd49bb3e56c78efae071a5bafed0688bf43 WatchSource:0}: Error finding container 23c885d27a3acc83fb83896ce6b12bd49bb3e56c78efae071a5bafed0688bf43: Status 404 returned error can't find the container with id 23c885d27a3acc83fb83896ce6b12bd49bb3e56c78efae071a5bafed0688bf43 Nov 25 10:30:06 crc kubenswrapper[4932]: I1125 10:30:06.680071 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-f71b-account-create-jpl8d" event={"ID":"9a7eaed6-9c4a-4070-92b9-438cf5907c7e","Type":"ContainerStarted","Data":"23c885d27a3acc83fb83896ce6b12bd49bb3e56c78efae071a5bafed0688bf43"} Nov 25 10:30:06 crc kubenswrapper[4932]: I1125 10:30:06.686138 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xbhld" event={"ID":"c2066bcc-99df-41ac-9fb9-15320db7e342","Type":"ContainerStarted","Data":"95d2dfde7bd517ac241e8ad16115b4408d54b3c21755ab9a812e7b92def165e0"} Nov 25 10:30:06 crc kubenswrapper[4932]: I1125 10:30:06.686243 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xbhld" event={"ID":"c2066bcc-99df-41ac-9fb9-15320db7e342","Type":"ContainerStarted","Data":"9ad4f8da34abca7e22fe3d3f0aaf07dcc024a7e3e118ef955c67fcb3d4fe8fad"} Nov 25 10:30:06 crc kubenswrapper[4932]: I1125 10:30:06.686286 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:06 crc kubenswrapper[4932]: I1125 10:30:06.688512 4932 generic.go:334] "Generic (PLEG): container finished" podID="fb5c2557-70fa-42c9-b3c1-739a5a34a558" containerID="248889e36b5e6688aa0c60593add36d74d08b73d7d739712cae25f5b5a0d4520" exitCode=0 Nov 25 10:30:06 crc kubenswrapper[4932]: I1125 10:30:06.688549 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-jvzv8" event={"ID":"fb5c2557-70fa-42c9-b3c1-739a5a34a558","Type":"ContainerDied","Data":"248889e36b5e6688aa0c60593add36d74d08b73d7d739712cae25f5b5a0d4520"} Nov 25 10:30:06 crc kubenswrapper[4932]: I1125 10:30:06.714443 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-xbhld" podStartSLOduration=4.714424468 podStartE2EDuration="4.714424468s" podCreationTimestamp="2025-11-25 10:30:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:30:06.710684171 +0000 UTC m=+6066.836713734" watchObservedRunningTime="2025-11-25 10:30:06.714424468 +0000 UTC m=+6066.840454041" Nov 25 10:30:07 crc kubenswrapper[4932]: I1125 10:30:07.692919 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:07 crc kubenswrapper[4932]: I1125 10:30:07.699419 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xbhld_c2066bcc-99df-41ac-9fb9-15320db7e342/ovs-vswitchd/0.log" Nov 25 10:30:07 crc kubenswrapper[4932]: I1125 10:30:07.700207 4932 generic.go:334] "Generic (PLEG): container finished" podID="c2066bcc-99df-41ac-9fb9-15320db7e342" containerID="95d2dfde7bd517ac241e8ad16115b4408d54b3c21755ab9a812e7b92def165e0" exitCode=1 Nov 25 10:30:07 crc kubenswrapper[4932]: I1125 10:30:07.700255 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xbhld" event={"ID":"c2066bcc-99df-41ac-9fb9-15320db7e342","Type":"ContainerDied","Data":"95d2dfde7bd517ac241e8ad16115b4408d54b3c21755ab9a812e7b92def165e0"} Nov 25 10:30:07 crc kubenswrapper[4932]: I1125 10:30:07.701048 4932 scope.go:117] "RemoveContainer" containerID="95d2dfde7bd517ac241e8ad16115b4408d54b3c21755ab9a812e7b92def165e0" Nov 25 10:30:07 crc kubenswrapper[4932]: I1125 10:30:07.701858 4932 generic.go:334] "Generic (PLEG): container finished" podID="9a7eaed6-9c4a-4070-92b9-438cf5907c7e" containerID="69196289d0e87f4d54b9f63b017f3883215ba42a950074a71af424266d39e925" exitCode=0 Nov 25 10:30:07 crc kubenswrapper[4932]: I1125 10:30:07.701908 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-f71b-account-create-jpl8d" event={"ID":"9a7eaed6-9c4a-4070-92b9-438cf5907c7e","Type":"ContainerDied","Data":"69196289d0e87f4d54b9f63b017f3883215ba42a950074a71af424266d39e925"} Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.072757 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-jvzv8" Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.224173 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rg8sh\" (UniqueName: \"kubernetes.io/projected/fb5c2557-70fa-42c9-b3c1-739a5a34a558-kube-api-access-rg8sh\") pod \"fb5c2557-70fa-42c9-b3c1-739a5a34a558\" (UID: \"fb5c2557-70fa-42c9-b3c1-739a5a34a558\") " Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.224280 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb5c2557-70fa-42c9-b3c1-739a5a34a558-operator-scripts\") pod \"fb5c2557-70fa-42c9-b3c1-739a5a34a558\" (UID: \"fb5c2557-70fa-42c9-b3c1-739a5a34a558\") " Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.224924 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb5c2557-70fa-42c9-b3c1-739a5a34a558-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fb5c2557-70fa-42c9-b3c1-739a5a34a558" (UID: "fb5c2557-70fa-42c9-b3c1-739a5a34a558"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.233177 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb5c2557-70fa-42c9-b3c1-739a5a34a558-kube-api-access-rg8sh" (OuterVolumeSpecName: "kube-api-access-rg8sh") pod "fb5c2557-70fa-42c9-b3c1-739a5a34a558" (UID: "fb5c2557-70fa-42c9-b3c1-739a5a34a558"). InnerVolumeSpecName "kube-api-access-rg8sh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.326722 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rg8sh\" (UniqueName: \"kubernetes.io/projected/fb5c2557-70fa-42c9-b3c1-739a5a34a558-kube-api-access-rg8sh\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.326759 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb5c2557-70fa-42c9-b3c1-739a5a34a558-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.722164 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xbhld_c2066bcc-99df-41ac-9fb9-15320db7e342/ovs-vswitchd/0.log" Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.724616 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xbhld" event={"ID":"c2066bcc-99df-41ac-9fb9-15320db7e342","Type":"ContainerStarted","Data":"9fdb08287a0882873fe430e3bbf2c7622892810283720b6e4b282ca0350d0595"} Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.727041 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-jvzv8" event={"ID":"fb5c2557-70fa-42c9-b3c1-739a5a34a558","Type":"ContainerDied","Data":"82cc4b43a357b58ff51d4cfaeeec4dc6f8d27efd8f75611ebbb30d29dee94cee"} Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.727468 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82cc4b43a357b58ff51d4cfaeeec4dc6f8d27efd8f75611ebbb30d29dee94cee" Nov 25 10:30:08 crc kubenswrapper[4932]: I1125 10:30:08.727086 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-jvzv8" Nov 25 10:30:09 crc kubenswrapper[4932]: I1125 10:30:09.082869 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-f71b-account-create-jpl8d" Nov 25 10:30:09 crc kubenswrapper[4932]: I1125 10:30:09.247419 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-operator-scripts\") pod \"9a7eaed6-9c4a-4070-92b9-438cf5907c7e\" (UID: \"9a7eaed6-9c4a-4070-92b9-438cf5907c7e\") " Nov 25 10:30:09 crc kubenswrapper[4932]: I1125 10:30:09.248024 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dh6xc\" (UniqueName: \"kubernetes.io/projected/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-kube-api-access-dh6xc\") pod \"9a7eaed6-9c4a-4070-92b9-438cf5907c7e\" (UID: \"9a7eaed6-9c4a-4070-92b9-438cf5907c7e\") " Nov 25 10:30:09 crc kubenswrapper[4932]: I1125 10:30:09.250445 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9a7eaed6-9c4a-4070-92b9-438cf5907c7e" (UID: "9a7eaed6-9c4a-4070-92b9-438cf5907c7e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:30:09 crc kubenswrapper[4932]: I1125 10:30:09.255560 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-kube-api-access-dh6xc" (OuterVolumeSpecName: "kube-api-access-dh6xc") pod "9a7eaed6-9c4a-4070-92b9-438cf5907c7e" (UID: "9a7eaed6-9c4a-4070-92b9-438cf5907c7e"). InnerVolumeSpecName "kube-api-access-dh6xc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:30:09 crc kubenswrapper[4932]: I1125 10:30:09.350493 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dh6xc\" (UniqueName: \"kubernetes.io/projected/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-kube-api-access-dh6xc\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:09 crc kubenswrapper[4932]: I1125 10:30:09.350557 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9a7eaed6-9c4a-4070-92b9-438cf5907c7e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:09 crc kubenswrapper[4932]: I1125 10:30:09.739574 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-f71b-account-create-jpl8d" event={"ID":"9a7eaed6-9c4a-4070-92b9-438cf5907c7e","Type":"ContainerDied","Data":"23c885d27a3acc83fb83896ce6b12bd49bb3e56c78efae071a5bafed0688bf43"} Nov 25 10:30:09 crc kubenswrapper[4932]: I1125 10:30:09.739609 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-f71b-account-create-jpl8d" Nov 25 10:30:09 crc kubenswrapper[4932]: I1125 10:30:09.739637 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23c885d27a3acc83fb83896ce6b12bd49bb3e56c78efae071a5bafed0688bf43" Nov 25 10:30:09 crc kubenswrapper[4932]: I1125 10:30:09.740148 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:10 crc kubenswrapper[4932]: I1125 10:30:10.517538 4932 scope.go:117] "RemoveContainer" containerID="5fe6cbde826d535523e9144110263db39acc9943edb9fb665a14a696f13b6318" Nov 25 10:30:10 crc kubenswrapper[4932]: I1125 10:30:10.567959 4932 scope.go:117] "RemoveContainer" containerID="623eaf68f11fb2a6869720b19ed2e51efe0e8e7ca6d07661393cef4a60309b00" Nov 25 10:30:10 crc kubenswrapper[4932]: I1125 10:30:10.599811 4932 scope.go:117] "RemoveContainer" containerID="0db22cda3dbbcc647d0df03bb1266e5a0e8631f338f508e9793662b80f0265bd" Nov 25 10:30:10 crc kubenswrapper[4932]: I1125 10:30:10.669853 4932 scope.go:117] "RemoveContainer" containerID="88d49f8d236adada4de921553e9c57d3d2dcb4e14fbb0aed8996708b09e3c744" Nov 25 10:30:10 crc kubenswrapper[4932]: I1125 10:30:10.693709 4932 scope.go:117] "RemoveContainer" containerID="d6660d5283c98153e44d37a76316ede92a598cf08fb655b0ece3802a81efb530" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.039160 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-persistence-db-create-pr4w7"] Nov 25 10:30:11 crc kubenswrapper[4932]: E1125 10:30:11.039973 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb5c2557-70fa-42c9-b3c1-739a5a34a558" containerName="mariadb-database-create" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.040003 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb5c2557-70fa-42c9-b3c1-739a5a34a558" containerName="mariadb-database-create" Nov 25 10:30:11 crc kubenswrapper[4932]: E1125 10:30:11.040020 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a7eaed6-9c4a-4070-92b9-438cf5907c7e" containerName="mariadb-account-create" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.040027 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a7eaed6-9c4a-4070-92b9-438cf5907c7e" containerName="mariadb-account-create" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.040276 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a7eaed6-9c4a-4070-92b9-438cf5907c7e" containerName="mariadb-account-create" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.040302 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb5c2557-70fa-42c9-b3c1-739a5a34a558" containerName="mariadb-database-create" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.041369 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-pr4w7" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.048960 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-pr4w7"] Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.090598 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9cnj\" (UniqueName: \"kubernetes.io/projected/16444bcb-2d63-4893-9ced-b0a5375eb0de-kube-api-access-q9cnj\") pod \"octavia-persistence-db-create-pr4w7\" (UID: \"16444bcb-2d63-4893-9ced-b0a5375eb0de\") " pod="openstack/octavia-persistence-db-create-pr4w7" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.090720 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/16444bcb-2d63-4893-9ced-b0a5375eb0de-operator-scripts\") pod \"octavia-persistence-db-create-pr4w7\" (UID: \"16444bcb-2d63-4893-9ced-b0a5375eb0de\") " pod="openstack/octavia-persistence-db-create-pr4w7" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.192868 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/16444bcb-2d63-4893-9ced-b0a5375eb0de-operator-scripts\") pod \"octavia-persistence-db-create-pr4w7\" (UID: \"16444bcb-2d63-4893-9ced-b0a5375eb0de\") " pod="openstack/octavia-persistence-db-create-pr4w7" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.193011 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9cnj\" (UniqueName: \"kubernetes.io/projected/16444bcb-2d63-4893-9ced-b0a5375eb0de-kube-api-access-q9cnj\") pod \"octavia-persistence-db-create-pr4w7\" (UID: \"16444bcb-2d63-4893-9ced-b0a5375eb0de\") " pod="openstack/octavia-persistence-db-create-pr4w7" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.193899 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/16444bcb-2d63-4893-9ced-b0a5375eb0de-operator-scripts\") pod \"octavia-persistence-db-create-pr4w7\" (UID: \"16444bcb-2d63-4893-9ced-b0a5375eb0de\") " pod="openstack/octavia-persistence-db-create-pr4w7" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.211529 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9cnj\" (UniqueName: \"kubernetes.io/projected/16444bcb-2d63-4893-9ced-b0a5375eb0de-kube-api-access-q9cnj\") pod \"octavia-persistence-db-create-pr4w7\" (UID: \"16444bcb-2d63-4893-9ced-b0a5375eb0de\") " pod="openstack/octavia-persistence-db-create-pr4w7" Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.364735 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-pr4w7" Nov 25 10:30:11 crc kubenswrapper[4932]: W1125 10:30:11.862879 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16444bcb_2d63_4893_9ced_b0a5375eb0de.slice/crio-714183be24a5030f69e20d01768f911e0e9f89d62cc7648187bdbb6b3f6785d2 WatchSource:0}: Error finding container 714183be24a5030f69e20d01768f911e0e9f89d62cc7648187bdbb6b3f6785d2: Status 404 returned error can't find the container with id 714183be24a5030f69e20d01768f911e0e9f89d62cc7648187bdbb6b3f6785d2 Nov 25 10:30:11 crc kubenswrapper[4932]: I1125 10:30:11.866290 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-pr4w7"] Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.440058 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-3074-account-create-rlmqt"] Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.442569 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-3074-account-create-rlmqt" Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.444527 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-persistence-db-secret" Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.448646 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-3074-account-create-rlmqt"] Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.524078 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwl86\" (UniqueName: \"kubernetes.io/projected/bac5b437-403a-4a3e-a84e-163690732ab3-kube-api-access-zwl86\") pod \"octavia-3074-account-create-rlmqt\" (UID: \"bac5b437-403a-4a3e-a84e-163690732ab3\") " pod="openstack/octavia-3074-account-create-rlmqt" Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.524402 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bac5b437-403a-4a3e-a84e-163690732ab3-operator-scripts\") pod \"octavia-3074-account-create-rlmqt\" (UID: \"bac5b437-403a-4a3e-a84e-163690732ab3\") " pod="openstack/octavia-3074-account-create-rlmqt" Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.625420 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwl86\" (UniqueName: \"kubernetes.io/projected/bac5b437-403a-4a3e-a84e-163690732ab3-kube-api-access-zwl86\") pod \"octavia-3074-account-create-rlmqt\" (UID: \"bac5b437-403a-4a3e-a84e-163690732ab3\") " pod="openstack/octavia-3074-account-create-rlmqt" Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.625541 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bac5b437-403a-4a3e-a84e-163690732ab3-operator-scripts\") pod \"octavia-3074-account-create-rlmqt\" (UID: \"bac5b437-403a-4a3e-a84e-163690732ab3\") " pod="openstack/octavia-3074-account-create-rlmqt" Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.626431 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bac5b437-403a-4a3e-a84e-163690732ab3-operator-scripts\") pod \"octavia-3074-account-create-rlmqt\" (UID: \"bac5b437-403a-4a3e-a84e-163690732ab3\") " pod="openstack/octavia-3074-account-create-rlmqt" Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.646421 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwl86\" (UniqueName: \"kubernetes.io/projected/bac5b437-403a-4a3e-a84e-163690732ab3-kube-api-access-zwl86\") pod \"octavia-3074-account-create-rlmqt\" (UID: \"bac5b437-403a-4a3e-a84e-163690732ab3\") " pod="openstack/octavia-3074-account-create-rlmqt" Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.774576 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-3074-account-create-rlmqt" Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.789280 4932 generic.go:334] "Generic (PLEG): container finished" podID="16444bcb-2d63-4893-9ced-b0a5375eb0de" containerID="c6916dc69497bd79d14d72ce57febec6f4a3e3d687336380e228671fa6100620" exitCode=0 Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.789323 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-pr4w7" event={"ID":"16444bcb-2d63-4893-9ced-b0a5375eb0de","Type":"ContainerDied","Data":"c6916dc69497bd79d14d72ce57febec6f4a3e3d687336380e228671fa6100620"} Nov 25 10:30:12 crc kubenswrapper[4932]: I1125 10:30:12.789358 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-pr4w7" event={"ID":"16444bcb-2d63-4893-9ced-b0a5375eb0de","Type":"ContainerStarted","Data":"714183be24a5030f69e20d01768f911e0e9f89d62cc7648187bdbb6b3f6785d2"} Nov 25 10:30:13 crc kubenswrapper[4932]: I1125 10:30:13.213831 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-3074-account-create-rlmqt"] Nov 25 10:30:13 crc kubenswrapper[4932]: W1125 10:30:13.217956 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbac5b437_403a_4a3e_a84e_163690732ab3.slice/crio-971605bee6789a1f35d6b6e21a2de15650d93b190d73044747e9351087853a0b WatchSource:0}: Error finding container 971605bee6789a1f35d6b6e21a2de15650d93b190d73044747e9351087853a0b: Status 404 returned error can't find the container with id 971605bee6789a1f35d6b6e21a2de15650d93b190d73044747e9351087853a0b Nov 25 10:30:13 crc kubenswrapper[4932]: I1125 10:30:13.797933 4932 generic.go:334] "Generic (PLEG): container finished" podID="bac5b437-403a-4a3e-a84e-163690732ab3" containerID="4941c3d95449ab909f380a85fedb642939a5ad3dc11f8d150ef802dd61a46216" exitCode=0 Nov 25 10:30:13 crc kubenswrapper[4932]: I1125 10:30:13.798986 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-3074-account-create-rlmqt" event={"ID":"bac5b437-403a-4a3e-a84e-163690732ab3","Type":"ContainerDied","Data":"4941c3d95449ab909f380a85fedb642939a5ad3dc11f8d150ef802dd61a46216"} Nov 25 10:30:13 crc kubenswrapper[4932]: I1125 10:30:13.799012 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-3074-account-create-rlmqt" event={"ID":"bac5b437-403a-4a3e-a84e-163690732ab3","Type":"ContainerStarted","Data":"971605bee6789a1f35d6b6e21a2de15650d93b190d73044747e9351087853a0b"} Nov 25 10:30:14 crc kubenswrapper[4932]: I1125 10:30:14.153986 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-pr4w7" Nov 25 10:30:14 crc kubenswrapper[4932]: I1125 10:30:14.263499 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9cnj\" (UniqueName: \"kubernetes.io/projected/16444bcb-2d63-4893-9ced-b0a5375eb0de-kube-api-access-q9cnj\") pod \"16444bcb-2d63-4893-9ced-b0a5375eb0de\" (UID: \"16444bcb-2d63-4893-9ced-b0a5375eb0de\") " Nov 25 10:30:14 crc kubenswrapper[4932]: I1125 10:30:14.263840 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/16444bcb-2d63-4893-9ced-b0a5375eb0de-operator-scripts\") pod \"16444bcb-2d63-4893-9ced-b0a5375eb0de\" (UID: \"16444bcb-2d63-4893-9ced-b0a5375eb0de\") " Nov 25 10:30:14 crc kubenswrapper[4932]: I1125 10:30:14.264503 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/16444bcb-2d63-4893-9ced-b0a5375eb0de-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "16444bcb-2d63-4893-9ced-b0a5375eb0de" (UID: "16444bcb-2d63-4893-9ced-b0a5375eb0de"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:30:14 crc kubenswrapper[4932]: I1125 10:30:14.269928 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16444bcb-2d63-4893-9ced-b0a5375eb0de-kube-api-access-q9cnj" (OuterVolumeSpecName: "kube-api-access-q9cnj") pod "16444bcb-2d63-4893-9ced-b0a5375eb0de" (UID: "16444bcb-2d63-4893-9ced-b0a5375eb0de"). InnerVolumeSpecName "kube-api-access-q9cnj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:30:14 crc kubenswrapper[4932]: I1125 10:30:14.366544 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9cnj\" (UniqueName: \"kubernetes.io/projected/16444bcb-2d63-4893-9ced-b0a5375eb0de-kube-api-access-q9cnj\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:14 crc kubenswrapper[4932]: I1125 10:30:14.366586 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/16444bcb-2d63-4893-9ced-b0a5375eb0de-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:14 crc kubenswrapper[4932]: I1125 10:30:14.808834 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-pr4w7" event={"ID":"16444bcb-2d63-4893-9ced-b0a5375eb0de","Type":"ContainerDied","Data":"714183be24a5030f69e20d01768f911e0e9f89d62cc7648187bdbb6b3f6785d2"} Nov 25 10:30:14 crc kubenswrapper[4932]: I1125 10:30:14.808880 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="714183be24a5030f69e20d01768f911e0e9f89d62cc7648187bdbb6b3f6785d2" Nov 25 10:30:14 crc kubenswrapper[4932]: I1125 10:30:14.808906 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-pr4w7" Nov 25 10:30:15 crc kubenswrapper[4932]: I1125 10:30:15.150246 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-3074-account-create-rlmqt" Nov 25 10:30:15 crc kubenswrapper[4932]: I1125 10:30:15.285636 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwl86\" (UniqueName: \"kubernetes.io/projected/bac5b437-403a-4a3e-a84e-163690732ab3-kube-api-access-zwl86\") pod \"bac5b437-403a-4a3e-a84e-163690732ab3\" (UID: \"bac5b437-403a-4a3e-a84e-163690732ab3\") " Nov 25 10:30:15 crc kubenswrapper[4932]: I1125 10:30:15.285848 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bac5b437-403a-4a3e-a84e-163690732ab3-operator-scripts\") pod \"bac5b437-403a-4a3e-a84e-163690732ab3\" (UID: \"bac5b437-403a-4a3e-a84e-163690732ab3\") " Nov 25 10:30:15 crc kubenswrapper[4932]: I1125 10:30:15.286874 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bac5b437-403a-4a3e-a84e-163690732ab3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bac5b437-403a-4a3e-a84e-163690732ab3" (UID: "bac5b437-403a-4a3e-a84e-163690732ab3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:30:15 crc kubenswrapper[4932]: I1125 10:30:15.291931 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bac5b437-403a-4a3e-a84e-163690732ab3-kube-api-access-zwl86" (OuterVolumeSpecName: "kube-api-access-zwl86") pod "bac5b437-403a-4a3e-a84e-163690732ab3" (UID: "bac5b437-403a-4a3e-a84e-163690732ab3"). InnerVolumeSpecName "kube-api-access-zwl86". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:30:15 crc kubenswrapper[4932]: I1125 10:30:15.389054 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bac5b437-403a-4a3e-a84e-163690732ab3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:15 crc kubenswrapper[4932]: I1125 10:30:15.389119 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwl86\" (UniqueName: \"kubernetes.io/projected/bac5b437-403a-4a3e-a84e-163690732ab3-kube-api-access-zwl86\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:15 crc kubenswrapper[4932]: I1125 10:30:15.819762 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-3074-account-create-rlmqt" event={"ID":"bac5b437-403a-4a3e-a84e-163690732ab3","Type":"ContainerDied","Data":"971605bee6789a1f35d6b6e21a2de15650d93b190d73044747e9351087853a0b"} Nov 25 10:30:15 crc kubenswrapper[4932]: I1125 10:30:15.820098 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="971605bee6789a1f35d6b6e21a2de15650d93b190d73044747e9351087853a0b" Nov 25 10:30:15 crc kubenswrapper[4932]: I1125 10:30:15.819821 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-3074-account-create-rlmqt" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.736064 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-7b799f9ff6-tshqq"] Nov 25 10:30:18 crc kubenswrapper[4932]: E1125 10:30:18.736712 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16444bcb-2d63-4893-9ced-b0a5375eb0de" containerName="mariadb-database-create" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.736724 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="16444bcb-2d63-4893-9ced-b0a5375eb0de" containerName="mariadb-database-create" Nov 25 10:30:18 crc kubenswrapper[4932]: E1125 10:30:18.736768 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bac5b437-403a-4a3e-a84e-163690732ab3" containerName="mariadb-account-create" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.736779 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="bac5b437-403a-4a3e-a84e-163690732ab3" containerName="mariadb-account-create" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.736962 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="bac5b437-403a-4a3e-a84e-163690732ab3" containerName="mariadb-account-create" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.736980 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="16444bcb-2d63-4893-9ced-b0a5375eb0de" containerName="mariadb-database-create" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.740601 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.742952 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-octavia-dockercfg-6fb4r" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.742950 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-ovndbs" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.744988 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-scripts" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.745306 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-config-data" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.748154 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-7b799f9ff6-tshqq"] Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.754009 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-config-data\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.754082 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-octavia-run\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.754121 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-scripts\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.754144 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-config-data-merged\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.754265 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-ovndb-tls-certs\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.754316 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-combined-ca-bundle\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.855015 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-ovndb-tls-certs\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.855099 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-combined-ca-bundle\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.855182 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-config-data\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.855242 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-octavia-run\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.855278 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-scripts\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.855307 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-config-data-merged\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.855834 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-octavia-run\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.856002 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-config-data-merged\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.864101 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-scripts\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.864230 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-ovndb-tls-certs\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.864447 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-config-data\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:18 crc kubenswrapper[4932]: I1125 10:30:18.866473 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-combined-ca-bundle\") pod \"octavia-api-7b799f9ff6-tshqq\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:19 crc kubenswrapper[4932]: I1125 10:30:19.058292 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:19 crc kubenswrapper[4932]: I1125 10:30:19.582372 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-7b799f9ff6-tshqq"] Nov 25 10:30:19 crc kubenswrapper[4932]: I1125 10:30:19.863860 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-7b799f9ff6-tshqq" event={"ID":"9d367b12-a287-44ee-b215-7bd62c33048a","Type":"ContainerStarted","Data":"afe69cb117847cee972d6c735ad60b2a63a3a0c8e76ebf84049517d7355fca3f"} Nov 25 10:30:29 crc kubenswrapper[4932]: I1125 10:30:29.963438 4932 generic.go:334] "Generic (PLEG): container finished" podID="9d367b12-a287-44ee-b215-7bd62c33048a" containerID="01b24f583218a0b89c54d8f7500514c61f7923d43d8d5a4aae14d783ac3db137" exitCode=0 Nov 25 10:30:29 crc kubenswrapper[4932]: I1125 10:30:29.963918 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-7b799f9ff6-tshqq" event={"ID":"9d367b12-a287-44ee-b215-7bd62c33048a","Type":"ContainerDied","Data":"01b24f583218a0b89c54d8f7500514c61f7923d43d8d5a4aae14d783ac3db137"} Nov 25 10:30:30 crc kubenswrapper[4932]: I1125 10:30:30.975014 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-7b799f9ff6-tshqq" event={"ID":"9d367b12-a287-44ee-b215-7bd62c33048a","Type":"ContainerStarted","Data":"1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e"} Nov 25 10:30:30 crc kubenswrapper[4932]: I1125 10:30:30.975509 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:30 crc kubenswrapper[4932]: I1125 10:30:30.975524 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-7b799f9ff6-tshqq" event={"ID":"9d367b12-a287-44ee-b215-7bd62c33048a","Type":"ContainerStarted","Data":"9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83"} Nov 25 10:30:30 crc kubenswrapper[4932]: I1125 10:30:30.975539 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:30 crc kubenswrapper[4932]: I1125 10:30:30.996493 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-7b799f9ff6-tshqq" podStartSLOduration=3.3620693 podStartE2EDuration="12.996468407s" podCreationTimestamp="2025-11-25 10:30:18 +0000 UTC" firstStartedPulling="2025-11-25 10:30:19.608695564 +0000 UTC m=+6079.734725127" lastFinishedPulling="2025-11-25 10:30:29.243094671 +0000 UTC m=+6089.369124234" observedRunningTime="2025-11-25 10:30:30.99273872 +0000 UTC m=+6091.118768283" watchObservedRunningTime="2025-11-25 10:30:30.996468407 +0000 UTC m=+6091.122497970" Nov 25 10:30:37 crc kubenswrapper[4932]: I1125 10:30:37.711140 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-8kh4g" Nov 25 10:30:37 crc kubenswrapper[4932]: I1125 10:30:37.738059 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:37 crc kubenswrapper[4932]: I1125 10:30:37.744402 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-xbhld" Nov 25 10:30:37 crc kubenswrapper[4932]: I1125 10:30:37.870722 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-8kh4g-config-qh9nx"] Nov 25 10:30:37 crc kubenswrapper[4932]: I1125 10:30:37.873077 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:37 crc kubenswrapper[4932]: I1125 10:30:37.874863 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 10:30:37 crc kubenswrapper[4932]: I1125 10:30:37.890573 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8kh4g-config-qh9nx"] Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.046168 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run-ovn\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.046590 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnwhm\" (UniqueName: \"kubernetes.io/projected/6dca87b1-aa9d-436e-833c-164f168d7da2-kube-api-access-vnwhm\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.046674 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-scripts\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.046705 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.046726 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-log-ovn\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.046799 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-additional-scripts\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.148238 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run-ovn\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.148344 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnwhm\" (UniqueName: \"kubernetes.io/projected/6dca87b1-aa9d-436e-833c-164f168d7da2-kube-api-access-vnwhm\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.148418 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-scripts\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.148449 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.148476 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-log-ovn\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.148509 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-additional-scripts\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.148628 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run-ovn\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.148628 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.148630 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-log-ovn\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.149541 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-additional-scripts\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.150565 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-scripts\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.179832 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnwhm\" (UniqueName: \"kubernetes.io/projected/6dca87b1-aa9d-436e-833c-164f168d7da2-kube-api-access-vnwhm\") pod \"ovn-controller-8kh4g-config-qh9nx\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.194898 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:38 crc kubenswrapper[4932]: I1125 10:30:38.633900 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8kh4g-config-qh9nx"] Nov 25 10:30:39 crc kubenswrapper[4932]: I1125 10:30:39.046205 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8kh4g-config-qh9nx" event={"ID":"6dca87b1-aa9d-436e-833c-164f168d7da2","Type":"ContainerStarted","Data":"7430ee6826b13766c94c4977758e114cf9a05c0c37e8918bcaa5526205b1047d"} Nov 25 10:30:39 crc kubenswrapper[4932]: I1125 10:30:39.046514 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8kh4g-config-qh9nx" event={"ID":"6dca87b1-aa9d-436e-833c-164f168d7da2","Type":"ContainerStarted","Data":"319c70936a096dfa66da108dd814fc123f3a24f1ea843d2127f7532e2c31da4b"} Nov 25 10:30:39 crc kubenswrapper[4932]: I1125 10:30:39.070100 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-8kh4g-config-qh9nx" podStartSLOduration=2.070081277 podStartE2EDuration="2.070081277s" podCreationTimestamp="2025-11-25 10:30:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:30:39.063322833 +0000 UTC m=+6099.189352426" watchObservedRunningTime="2025-11-25 10:30:39.070081277 +0000 UTC m=+6099.196110840" Nov 25 10:30:40 crc kubenswrapper[4932]: I1125 10:30:40.058826 4932 generic.go:334] "Generic (PLEG): container finished" podID="6dca87b1-aa9d-436e-833c-164f168d7da2" containerID="7430ee6826b13766c94c4977758e114cf9a05c0c37e8918bcaa5526205b1047d" exitCode=0 Nov 25 10:30:40 crc kubenswrapper[4932]: I1125 10:30:40.058885 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8kh4g-config-qh9nx" event={"ID":"6dca87b1-aa9d-436e-833c-164f168d7da2","Type":"ContainerDied","Data":"7430ee6826b13766c94c4977758e114cf9a05c0c37e8918bcaa5526205b1047d"} Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.343625 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-rsyslog-2t9rs"] Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.345592 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.349807 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-config-data" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.350409 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-scripts" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.353044 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"octavia-hmport-map" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.355110 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-2t9rs"] Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.475402 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.510013 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8026b50-f832-4630-aacb-5d5bb5816dee-scripts\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.510122 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/b8026b50-f832-4630-aacb-5d5bb5816dee-hm-ports\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.510243 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8026b50-f832-4630-aacb-5d5bb5816dee-config-data\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.510402 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/b8026b50-f832-4630-aacb-5d5bb5816dee-config-data-merged\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.612280 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run-ovn\") pod \"6dca87b1-aa9d-436e-833c-164f168d7da2\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.612324 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-scripts\") pod \"6dca87b1-aa9d-436e-833c-164f168d7da2\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.612389 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-additional-scripts\") pod \"6dca87b1-aa9d-436e-833c-164f168d7da2\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.612433 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-log-ovn\") pod \"6dca87b1-aa9d-436e-833c-164f168d7da2\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.612471 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "6dca87b1-aa9d-436e-833c-164f168d7da2" (UID: "6dca87b1-aa9d-436e-833c-164f168d7da2"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.612541 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vnwhm\" (UniqueName: \"kubernetes.io/projected/6dca87b1-aa9d-436e-833c-164f168d7da2-kube-api-access-vnwhm\") pod \"6dca87b1-aa9d-436e-833c-164f168d7da2\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.612590 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run\") pod \"6dca87b1-aa9d-436e-833c-164f168d7da2\" (UID: \"6dca87b1-aa9d-436e-833c-164f168d7da2\") " Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.612589 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "6dca87b1-aa9d-436e-833c-164f168d7da2" (UID: "6dca87b1-aa9d-436e-833c-164f168d7da2"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.612796 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run" (OuterVolumeSpecName: "var-run") pod "6dca87b1-aa9d-436e-833c-164f168d7da2" (UID: "6dca87b1-aa9d-436e-833c-164f168d7da2"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.612983 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8026b50-f832-4630-aacb-5d5bb5816dee-config-data\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.613038 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/b8026b50-f832-4630-aacb-5d5bb5816dee-config-data-merged\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.613152 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8026b50-f832-4630-aacb-5d5bb5816dee-scripts\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.613230 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/b8026b50-f832-4630-aacb-5d5bb5816dee-hm-ports\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.613327 4932 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.613344 4932 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.613354 4932 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6dca87b1-aa9d-436e-833c-164f168d7da2-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.613587 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/b8026b50-f832-4630-aacb-5d5bb5816dee-config-data-merged\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.613899 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "6dca87b1-aa9d-436e-833c-164f168d7da2" (UID: "6dca87b1-aa9d-436e-833c-164f168d7da2"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.614270 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/b8026b50-f832-4630-aacb-5d5bb5816dee-hm-ports\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.614368 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-scripts" (OuterVolumeSpecName: "scripts") pod "6dca87b1-aa9d-436e-833c-164f168d7da2" (UID: "6dca87b1-aa9d-436e-833c-164f168d7da2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.618684 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6dca87b1-aa9d-436e-833c-164f168d7da2-kube-api-access-vnwhm" (OuterVolumeSpecName: "kube-api-access-vnwhm") pod "6dca87b1-aa9d-436e-833c-164f168d7da2" (UID: "6dca87b1-aa9d-436e-833c-164f168d7da2"). InnerVolumeSpecName "kube-api-access-vnwhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.619172 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b8026b50-f832-4630-aacb-5d5bb5816dee-scripts\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.626686 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b8026b50-f832-4630-aacb-5d5bb5816dee-config-data\") pod \"octavia-rsyslog-2t9rs\" (UID: \"b8026b50-f832-4630-aacb-5d5bb5816dee\") " pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.673437 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.714741 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vnwhm\" (UniqueName: \"kubernetes.io/projected/6dca87b1-aa9d-436e-833c-164f168d7da2-kube-api-access-vnwhm\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.714820 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:41 crc kubenswrapper[4932]: I1125 10:30:41.714832 4932 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6dca87b1-aa9d-436e-833c-164f168d7da2-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.083237 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8kh4g-config-qh9nx" event={"ID":"6dca87b1-aa9d-436e-833c-164f168d7da2","Type":"ContainerDied","Data":"319c70936a096dfa66da108dd814fc123f3a24f1ea843d2127f7532e2c31da4b"} Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.083276 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="319c70936a096dfa66da108dd814fc123f3a24f1ea843d2127f7532e2c31da4b" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.083321 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8kh4g-config-qh9nx" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.165836 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-8kh4g-config-qh9nx"] Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.194098 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-8kh4g-config-qh9nx"] Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.209993 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-2t9rs"] Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.260734 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-8kh4g-config-t87hq"] Nov 25 10:30:42 crc kubenswrapper[4932]: E1125 10:30:42.261414 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dca87b1-aa9d-436e-833c-164f168d7da2" containerName="ovn-config" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.261436 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dca87b1-aa9d-436e-833c-164f168d7da2" containerName="ovn-config" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.261702 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="6dca87b1-aa9d-436e-833c-164f168d7da2" containerName="ovn-config" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.262545 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.265143 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.270147 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8kh4g-config-t87hq"] Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.431490 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-5955f5554b-7kzmd"] Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.433293 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run-ovn\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.433338 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfjn8\" (UniqueName: \"kubernetes.io/projected/1fff062b-dae1-4f9b-8f26-1e7511629017-kube-api-access-jfjn8\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.433383 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-additional-scripts\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.433419 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.433482 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-scripts\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.433515 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-log-ovn\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.436445 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.441540 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.459494 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-7kzmd"] Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.535523 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/95cbafac-1b86-480a-b491-7e19df31f063-httpd-config\") pod \"octavia-image-upload-5955f5554b-7kzmd\" (UID: \"95cbafac-1b86-480a-b491-7e19df31f063\") " pod="openstack/octavia-image-upload-5955f5554b-7kzmd" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.535618 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-additional-scripts\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.535850 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.536148 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-scripts\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.536274 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-log-ovn\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.536319 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.536428 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-log-ovn\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.536844 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run-ovn\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.536907 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run-ovn\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.536903 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfjn8\" (UniqueName: \"kubernetes.io/projected/1fff062b-dae1-4f9b-8f26-1e7511629017-kube-api-access-jfjn8\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.536987 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-additional-scripts\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.537006 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/95cbafac-1b86-480a-b491-7e19df31f063-amphora-image\") pod \"octavia-image-upload-5955f5554b-7kzmd\" (UID: \"95cbafac-1b86-480a-b491-7e19df31f063\") " pod="openstack/octavia-image-upload-5955f5554b-7kzmd" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.539323 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-scripts\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.566168 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfjn8\" (UniqueName: \"kubernetes.io/projected/1fff062b-dae1-4f9b-8f26-1e7511629017-kube-api-access-jfjn8\") pod \"ovn-controller-8kh4g-config-t87hq\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.589169 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.626534 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6dca87b1-aa9d-436e-833c-164f168d7da2" path="/var/lib/kubelet/pods/6dca87b1-aa9d-436e-833c-164f168d7da2/volumes" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.638458 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/95cbafac-1b86-480a-b491-7e19df31f063-amphora-image\") pod \"octavia-image-upload-5955f5554b-7kzmd\" (UID: \"95cbafac-1b86-480a-b491-7e19df31f063\") " pod="openstack/octavia-image-upload-5955f5554b-7kzmd" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.638521 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/95cbafac-1b86-480a-b491-7e19df31f063-httpd-config\") pod \"octavia-image-upload-5955f5554b-7kzmd\" (UID: \"95cbafac-1b86-480a-b491-7e19df31f063\") " pod="openstack/octavia-image-upload-5955f5554b-7kzmd" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.651912 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/95cbafac-1b86-480a-b491-7e19df31f063-amphora-image\") pod \"octavia-image-upload-5955f5554b-7kzmd\" (UID: \"95cbafac-1b86-480a-b491-7e19df31f063\") " pod="openstack/octavia-image-upload-5955f5554b-7kzmd" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.652502 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/95cbafac-1b86-480a-b491-7e19df31f063-httpd-config\") pod \"octavia-image-upload-5955f5554b-7kzmd\" (UID: \"95cbafac-1b86-480a-b491-7e19df31f063\") " pod="openstack/octavia-image-upload-5955f5554b-7kzmd" Nov 25 10:30:42 crc kubenswrapper[4932]: I1125 10:30:42.756683 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.071462 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-8kh4g-config-t87hq"] Nov 25 10:30:43 crc kubenswrapper[4932]: W1125 10:30:43.074400 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1fff062b_dae1_4f9b_8f26_1e7511629017.slice/crio-71e0d12e2fbb0bbf5ec9307922f00f1648214a5aad0d5eaadda1b8621970ac6d WatchSource:0}: Error finding container 71e0d12e2fbb0bbf5ec9307922f00f1648214a5aad0d5eaadda1b8621970ac6d: Status 404 returned error can't find the container with id 71e0d12e2fbb0bbf5ec9307922f00f1648214a5aad0d5eaadda1b8621970ac6d Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.094869 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8kh4g-config-t87hq" event={"ID":"1fff062b-dae1-4f9b-8f26-1e7511629017","Type":"ContainerStarted","Data":"71e0d12e2fbb0bbf5ec9307922f00f1648214a5aad0d5eaadda1b8621970ac6d"} Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.096212 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-2t9rs" event={"ID":"b8026b50-f832-4630-aacb-5d5bb5816dee","Type":"ContainerStarted","Data":"d916241b5b878b5d10b2ce76e2552a79397500aba055652929724cf0d04ce717"} Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.246661 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-7kzmd"] Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.534027 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-sync-dfpz5"] Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.535671 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.537685 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-scripts" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.554675 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-dfpz5"] Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.661578 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-scripts\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.661668 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-combined-ca-bundle\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.661754 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.661802 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data-merged\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.764094 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-combined-ca-bundle\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.764394 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.764511 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data-merged\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.764629 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-scripts\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.765135 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data-merged\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.772049 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-combined-ca-bundle\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.773336 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.780105 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-scripts\") pod \"octavia-db-sync-dfpz5\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:43 crc kubenswrapper[4932]: I1125 10:30:43.862010 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:44 crc kubenswrapper[4932]: I1125 10:30:44.117170 4932 generic.go:334] "Generic (PLEG): container finished" podID="1fff062b-dae1-4f9b-8f26-1e7511629017" containerID="c1271bbd203bf128ebe6560aba8b9d8537f76fd3a43378f19caae83de8afc6cb" exitCode=0 Nov 25 10:30:44 crc kubenswrapper[4932]: I1125 10:30:44.117265 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8kh4g-config-t87hq" event={"ID":"1fff062b-dae1-4f9b-8f26-1e7511629017","Type":"ContainerDied","Data":"c1271bbd203bf128ebe6560aba8b9d8537f76fd3a43378f19caae83de8afc6cb"} Nov 25 10:30:44 crc kubenswrapper[4932]: I1125 10:30:44.120781 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" event={"ID":"95cbafac-1b86-480a-b491-7e19df31f063","Type":"ContainerStarted","Data":"17fb94a5ea9fb3ce7dc658e490b9eda37a95ee1299129caee6efcc15ad4b4942"} Nov 25 10:30:44 crc kubenswrapper[4932]: I1125 10:30:44.538167 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-dfpz5"] Nov 25 10:30:44 crc kubenswrapper[4932]: W1125 10:30:44.665429 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4826971d_c5a6_4c1a_be73_58a94eb21dd9.slice/crio-92ae6ed98bf25eb7ae58cebbd5c1ab3b20f9bd847de5850a0f851547e2bf9003 WatchSource:0}: Error finding container 92ae6ed98bf25eb7ae58cebbd5c1ab3b20f9bd847de5850a0f851547e2bf9003: Status 404 returned error can't find the container with id 92ae6ed98bf25eb7ae58cebbd5c1ab3b20f9bd847de5850a0f851547e2bf9003 Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.133168 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-dfpz5" event={"ID":"4826971d-c5a6-4c1a-be73-58a94eb21dd9","Type":"ContainerStarted","Data":"92ae6ed98bf25eb7ae58cebbd5c1ab3b20f9bd847de5850a0f851547e2bf9003"} Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.134955 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-2t9rs" event={"ID":"b8026b50-f832-4630-aacb-5d5bb5816dee","Type":"ContainerStarted","Data":"1d8bb3b79adfef93b4d37b5679936fbb13a52533c667a2dffdf9141ff3058175"} Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.582428 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.705784 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-log-ovn\") pod \"1fff062b-dae1-4f9b-8f26-1e7511629017\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.705879 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-scripts\") pod \"1fff062b-dae1-4f9b-8f26-1e7511629017\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.705896 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "1fff062b-dae1-4f9b-8f26-1e7511629017" (UID: "1fff062b-dae1-4f9b-8f26-1e7511629017"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.706994 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run-ovn\") pod \"1fff062b-dae1-4f9b-8f26-1e7511629017\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.707048 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "1fff062b-dae1-4f9b-8f26-1e7511629017" (UID: "1fff062b-dae1-4f9b-8f26-1e7511629017"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.707256 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run\") pod \"1fff062b-dae1-4f9b-8f26-1e7511629017\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.707291 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run" (OuterVolumeSpecName: "var-run") pod "1fff062b-dae1-4f9b-8f26-1e7511629017" (UID: "1fff062b-dae1-4f9b-8f26-1e7511629017"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.707503 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-scripts" (OuterVolumeSpecName: "scripts") pod "1fff062b-dae1-4f9b-8f26-1e7511629017" (UID: "1fff062b-dae1-4f9b-8f26-1e7511629017"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.707499 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-additional-scripts\") pod \"1fff062b-dae1-4f9b-8f26-1e7511629017\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.707601 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfjn8\" (UniqueName: \"kubernetes.io/projected/1fff062b-dae1-4f9b-8f26-1e7511629017-kube-api-access-jfjn8\") pod \"1fff062b-dae1-4f9b-8f26-1e7511629017\" (UID: \"1fff062b-dae1-4f9b-8f26-1e7511629017\") " Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.708276 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "1fff062b-dae1-4f9b-8f26-1e7511629017" (UID: "1fff062b-dae1-4f9b-8f26-1e7511629017"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.708764 4932 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.708790 4932 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.708802 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1fff062b-dae1-4f9b-8f26-1e7511629017-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.708814 4932 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.708824 4932 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1fff062b-dae1-4f9b-8f26-1e7511629017-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.717957 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fff062b-dae1-4f9b-8f26-1e7511629017-kube-api-access-jfjn8" (OuterVolumeSpecName: "kube-api-access-jfjn8") pod "1fff062b-dae1-4f9b-8f26-1e7511629017" (UID: "1fff062b-dae1-4f9b-8f26-1e7511629017"). InnerVolumeSpecName "kube-api-access-jfjn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:30:45 crc kubenswrapper[4932]: I1125 10:30:45.810513 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfjn8\" (UniqueName: \"kubernetes.io/projected/1fff062b-dae1-4f9b-8f26-1e7511629017-kube-api-access-jfjn8\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:46 crc kubenswrapper[4932]: I1125 10:30:46.153364 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-8kh4g-config-t87hq" Nov 25 10:30:46 crc kubenswrapper[4932]: I1125 10:30:46.153363 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-8kh4g-config-t87hq" event={"ID":"1fff062b-dae1-4f9b-8f26-1e7511629017","Type":"ContainerDied","Data":"71e0d12e2fbb0bbf5ec9307922f00f1648214a5aad0d5eaadda1b8621970ac6d"} Nov 25 10:30:46 crc kubenswrapper[4932]: I1125 10:30:46.153502 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="71e0d12e2fbb0bbf5ec9307922f00f1648214a5aad0d5eaadda1b8621970ac6d" Nov 25 10:30:46 crc kubenswrapper[4932]: I1125 10:30:46.158575 4932 generic.go:334] "Generic (PLEG): container finished" podID="4826971d-c5a6-4c1a-be73-58a94eb21dd9" containerID="3a7caf3900268974e4a495aeed77b7e4e31df39ff7b96aa8d049e45540f9720a" exitCode=0 Nov 25 10:30:46 crc kubenswrapper[4932]: I1125 10:30:46.158709 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-dfpz5" event={"ID":"4826971d-c5a6-4c1a-be73-58a94eb21dd9","Type":"ContainerDied","Data":"3a7caf3900268974e4a495aeed77b7e4e31df39ff7b96aa8d049e45540f9720a"} Nov 25 10:30:46 crc kubenswrapper[4932]: I1125 10:30:46.661789 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-8kh4g-config-t87hq"] Nov 25 10:30:46 crc kubenswrapper[4932]: I1125 10:30:46.671606 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-8kh4g-config-t87hq"] Nov 25 10:30:47 crc kubenswrapper[4932]: I1125 10:30:47.178750 4932 generic.go:334] "Generic (PLEG): container finished" podID="b8026b50-f832-4630-aacb-5d5bb5816dee" containerID="1d8bb3b79adfef93b4d37b5679936fbb13a52533c667a2dffdf9141ff3058175" exitCode=0 Nov 25 10:30:47 crc kubenswrapper[4932]: I1125 10:30:47.178963 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-2t9rs" event={"ID":"b8026b50-f832-4630-aacb-5d5bb5816dee","Type":"ContainerDied","Data":"1d8bb3b79adfef93b4d37b5679936fbb13a52533c667a2dffdf9141ff3058175"} Nov 25 10:30:47 crc kubenswrapper[4932]: I1125 10:30:47.186661 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-dfpz5" event={"ID":"4826971d-c5a6-4c1a-be73-58a94eb21dd9","Type":"ContainerStarted","Data":"3ecf39e075708b3d8717dc54dde1f9b39c6b6404c4d3ca37fc7d6dce77e7707b"} Nov 25 10:30:47 crc kubenswrapper[4932]: I1125 10:30:47.231113 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-db-sync-dfpz5" podStartSLOduration=4.231097917 podStartE2EDuration="4.231097917s" podCreationTimestamp="2025-11-25 10:30:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:30:47.222886882 +0000 UTC m=+6107.348916445" watchObservedRunningTime="2025-11-25 10:30:47.231097917 +0000 UTC m=+6107.357127480" Nov 25 10:30:48 crc kubenswrapper[4932]: I1125 10:30:48.619211 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fff062b-dae1-4f9b-8f26-1e7511629017" path="/var/lib/kubelet/pods/1fff062b-dae1-4f9b-8f26-1e7511629017/volumes" Nov 25 10:30:50 crc kubenswrapper[4932]: I1125 10:30:50.220700 4932 generic.go:334] "Generic (PLEG): container finished" podID="4826971d-c5a6-4c1a-be73-58a94eb21dd9" containerID="3ecf39e075708b3d8717dc54dde1f9b39c6b6404c4d3ca37fc7d6dce77e7707b" exitCode=0 Nov 25 10:30:50 crc kubenswrapper[4932]: I1125 10:30:50.221201 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-dfpz5" event={"ID":"4826971d-c5a6-4c1a-be73-58a94eb21dd9","Type":"ContainerDied","Data":"3ecf39e075708b3d8717dc54dde1f9b39c6b6404c4d3ca37fc7d6dce77e7707b"} Nov 25 10:30:53 crc kubenswrapper[4932]: I1125 10:30:53.419137 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:53 crc kubenswrapper[4932]: I1125 10:30:53.488769 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.418236 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.541103 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-combined-ca-bundle\") pod \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.541181 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data-merged\") pod \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.541276 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data\") pod \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.541350 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-scripts\") pod \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\" (UID: \"4826971d-c5a6-4c1a-be73-58a94eb21dd9\") " Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.546715 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data" (OuterVolumeSpecName: "config-data") pod "4826971d-c5a6-4c1a-be73-58a94eb21dd9" (UID: "4826971d-c5a6-4c1a-be73-58a94eb21dd9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.547833 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-scripts" (OuterVolumeSpecName: "scripts") pod "4826971d-c5a6-4c1a-be73-58a94eb21dd9" (UID: "4826971d-c5a6-4c1a-be73-58a94eb21dd9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.572562 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4826971d-c5a6-4c1a-be73-58a94eb21dd9" (UID: "4826971d-c5a6-4c1a-be73-58a94eb21dd9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.572491 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "4826971d-c5a6-4c1a-be73-58a94eb21dd9" (UID: "4826971d-c5a6-4c1a-be73-58a94eb21dd9"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.643070 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.643100 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.643116 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data-merged\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:54 crc kubenswrapper[4932]: I1125 10:30:54.643128 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4826971d-c5a6-4c1a-be73-58a94eb21dd9-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:30:55 crc kubenswrapper[4932]: I1125 10:30:55.303810 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-dfpz5" event={"ID":"4826971d-c5a6-4c1a-be73-58a94eb21dd9","Type":"ContainerDied","Data":"92ae6ed98bf25eb7ae58cebbd5c1ab3b20f9bd847de5850a0f851547e2bf9003"} Nov 25 10:30:55 crc kubenswrapper[4932]: I1125 10:30:55.304401 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92ae6ed98bf25eb7ae58cebbd5c1ab3b20f9bd847de5850a0f851547e2bf9003" Nov 25 10:30:55 crc kubenswrapper[4932]: I1125 10:30:55.303830 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-dfpz5" Nov 25 10:30:55 crc kubenswrapper[4932]: I1125 10:30:55.305617 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" event={"ID":"95cbafac-1b86-480a-b491-7e19df31f063","Type":"ContainerStarted","Data":"6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250"} Nov 25 10:30:55 crc kubenswrapper[4932]: I1125 10:30:55.308358 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-2t9rs" event={"ID":"b8026b50-f832-4630-aacb-5d5bb5816dee","Type":"ContainerStarted","Data":"25a4863d8d94e09dcb5a287da3abe07b6bd5dee977d7181d332daa53b733766a"} Nov 25 10:30:55 crc kubenswrapper[4932]: I1125 10:30:55.308714 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:30:55 crc kubenswrapper[4932]: I1125 10:30:55.359471 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-rsyslog-2t9rs" podStartSLOduration=1.645811214 podStartE2EDuration="14.359445062s" podCreationTimestamp="2025-11-25 10:30:41 +0000 UTC" firstStartedPulling="2025-11-25 10:30:42.222340628 +0000 UTC m=+6102.348370191" lastFinishedPulling="2025-11-25 10:30:54.935974476 +0000 UTC m=+6115.062004039" observedRunningTime="2025-11-25 10:30:55.348027495 +0000 UTC m=+6115.474057058" watchObservedRunningTime="2025-11-25 10:30:55.359445062 +0000 UTC m=+6115.485474635" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.648241 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-58b89b8788-wj674"] Nov 25 10:30:56 crc kubenswrapper[4932]: E1125 10:30:56.653096 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4826971d-c5a6-4c1a-be73-58a94eb21dd9" containerName="init" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.653124 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4826971d-c5a6-4c1a-be73-58a94eb21dd9" containerName="init" Nov 25 10:30:56 crc kubenswrapper[4932]: E1125 10:30:56.653151 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fff062b-dae1-4f9b-8f26-1e7511629017" containerName="ovn-config" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.653159 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fff062b-dae1-4f9b-8f26-1e7511629017" containerName="ovn-config" Nov 25 10:30:56 crc kubenswrapper[4932]: E1125 10:30:56.653178 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4826971d-c5a6-4c1a-be73-58a94eb21dd9" containerName="octavia-db-sync" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.653203 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4826971d-c5a6-4c1a-be73-58a94eb21dd9" containerName="octavia-db-sync" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.653415 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4826971d-c5a6-4c1a-be73-58a94eb21dd9" containerName="octavia-db-sync" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.653426 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fff062b-dae1-4f9b-8f26-1e7511629017" containerName="ovn-config" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.654964 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.659015 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-public-svc" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.659440 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-internal-svc" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.663426 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-58b89b8788-wj674"] Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.695632 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-ovndb-tls-certs\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.695678 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-config-data\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.695709 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-octavia-run\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.695740 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-public-tls-certs\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.695767 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-config-data-merged\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.695785 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-scripts\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.695831 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-combined-ca-bundle\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.695848 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-internal-tls-certs\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.796556 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-public-tls-certs\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.796602 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-config-data-merged\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.796622 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-scripts\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.796663 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-combined-ca-bundle\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.796682 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-internal-tls-certs\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.796761 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-ovndb-tls-certs\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.796788 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-config-data\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.796816 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-octavia-run\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.797376 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-config-data-merged\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.797409 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-octavia-run\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.802434 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-scripts\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.802988 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-public-tls-certs\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.803001 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-internal-tls-certs\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.803525 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-ovndb-tls-certs\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.804045 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-combined-ca-bundle\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.809035 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/989abece-ef47-44ab-a85b-ea1f5fd4cf2a-config-data\") pod \"octavia-api-58b89b8788-wj674\" (UID: \"989abece-ef47-44ab-a85b-ea1f5fd4cf2a\") " pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:56 crc kubenswrapper[4932]: I1125 10:30:56.986421 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:30:57 crc kubenswrapper[4932]: I1125 10:30:57.454039 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-58b89b8788-wj674"] Nov 25 10:30:58 crc kubenswrapper[4932]: I1125 10:30:58.342453 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-58b89b8788-wj674" event={"ID":"989abece-ef47-44ab-a85b-ea1f5fd4cf2a","Type":"ContainerStarted","Data":"9f2a14f804b37cc80b785525e3c6a69daa5d7015332a300e12295bf9f805b7bb"} Nov 25 10:30:58 crc kubenswrapper[4932]: I1125 10:30:58.342892 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-58b89b8788-wj674" event={"ID":"989abece-ef47-44ab-a85b-ea1f5fd4cf2a","Type":"ContainerStarted","Data":"6cd80950eea88dd0b6bd592c420d04d6948a7bf65bf301602cbcf4ba0361bdd2"} Nov 25 10:30:59 crc kubenswrapper[4932]: I1125 10:30:59.354095 4932 generic.go:334] "Generic (PLEG): container finished" podID="95cbafac-1b86-480a-b491-7e19df31f063" containerID="6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250" exitCode=0 Nov 25 10:30:59 crc kubenswrapper[4932]: I1125 10:30:59.354385 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" event={"ID":"95cbafac-1b86-480a-b491-7e19df31f063","Type":"ContainerDied","Data":"6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250"} Nov 25 10:31:00 crc kubenswrapper[4932]: I1125 10:31:00.366964 4932 generic.go:334] "Generic (PLEG): container finished" podID="989abece-ef47-44ab-a85b-ea1f5fd4cf2a" containerID="9f2a14f804b37cc80b785525e3c6a69daa5d7015332a300e12295bf9f805b7bb" exitCode=0 Nov 25 10:31:00 crc kubenswrapper[4932]: I1125 10:31:00.367067 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-58b89b8788-wj674" event={"ID":"989abece-ef47-44ab-a85b-ea1f5fd4cf2a","Type":"ContainerDied","Data":"9f2a14f804b37cc80b785525e3c6a69daa5d7015332a300e12295bf9f805b7bb"} Nov 25 10:31:00 crc kubenswrapper[4932]: I1125 10:31:00.389649 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" event={"ID":"95cbafac-1b86-480a-b491-7e19df31f063","Type":"ContainerStarted","Data":"030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8"} Nov 25 10:31:00 crc kubenswrapper[4932]: I1125 10:31:00.456736 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" podStartSLOduration=6.649446089 podStartE2EDuration="18.456713473s" podCreationTimestamp="2025-11-25 10:30:42 +0000 UTC" firstStartedPulling="2025-11-25 10:30:43.26847131 +0000 UTC m=+6103.394500873" lastFinishedPulling="2025-11-25 10:30:55.075738694 +0000 UTC m=+6115.201768257" observedRunningTime="2025-11-25 10:31:00.445282166 +0000 UTC m=+6120.571311749" watchObservedRunningTime="2025-11-25 10:31:00.456713473 +0000 UTC m=+6120.582743036" Nov 25 10:31:01 crc kubenswrapper[4932]: I1125 10:31:01.401653 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-58b89b8788-wj674" event={"ID":"989abece-ef47-44ab-a85b-ea1f5fd4cf2a","Type":"ContainerStarted","Data":"c676c69a28053e22fc325d450a71ae6699e582b2f8115519f629743c99aac917"} Nov 25 10:31:01 crc kubenswrapper[4932]: I1125 10:31:01.402014 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-58b89b8788-wj674" event={"ID":"989abece-ef47-44ab-a85b-ea1f5fd4cf2a","Type":"ContainerStarted","Data":"b838885590a81c3d39adb80e4206ec23366f877cce9b760da5b5c3425f731466"} Nov 25 10:31:01 crc kubenswrapper[4932]: I1125 10:31:01.402276 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:31:01 crc kubenswrapper[4932]: I1125 10:31:01.402313 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:31:01 crc kubenswrapper[4932]: I1125 10:31:01.436063 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-58b89b8788-wj674" podStartSLOduration=5.436042284 podStartE2EDuration="5.436042284s" podCreationTimestamp="2025-11-25 10:30:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:31:01.426584203 +0000 UTC m=+6121.552613786" watchObservedRunningTime="2025-11-25 10:31:01.436042284 +0000 UTC m=+6121.562071847" Nov 25 10:31:10 crc kubenswrapper[4932]: I1125 10:31:10.875537 4932 scope.go:117] "RemoveContainer" containerID="fc008c454e853da5bee0263aa2bbf89684bb8e828a3d5ef25066d9a778ff45f0" Nov 25 10:31:10 crc kubenswrapper[4932]: I1125 10:31:10.906260 4932 scope.go:117] "RemoveContainer" containerID="945e876787244a7a68b8d992a3cda16055eed25ae9f958fc05d706c1da8b0e6d" Nov 25 10:31:11 crc kubenswrapper[4932]: I1125 10:31:11.722386 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-rsyslog-2t9rs" Nov 25 10:31:16 crc kubenswrapper[4932]: I1125 10:31:16.265847 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:31:16 crc kubenswrapper[4932]: I1125 10:31:16.396297 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-58b89b8788-wj674" Nov 25 10:31:16 crc kubenswrapper[4932]: I1125 10:31:16.458852 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-api-7b799f9ff6-tshqq"] Nov 25 10:31:16 crc kubenswrapper[4932]: I1125 10:31:16.459140 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-api-7b799f9ff6-tshqq" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="octavia-api" containerID="cri-o://9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83" gracePeriod=30 Nov 25 10:31:16 crc kubenswrapper[4932]: I1125 10:31:16.459636 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-api-7b799f9ff6-tshqq" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="octavia-api-provider-agent" containerID="cri-o://1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e" gracePeriod=30 Nov 25 10:31:17 crc kubenswrapper[4932]: I1125 10:31:17.549250 4932 generic.go:334] "Generic (PLEG): container finished" podID="9d367b12-a287-44ee-b215-7bd62c33048a" containerID="1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e" exitCode=0 Nov 25 10:31:17 crc kubenswrapper[4932]: I1125 10:31:17.549307 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-7b799f9ff6-tshqq" event={"ID":"9d367b12-a287-44ee-b215-7bd62c33048a","Type":"ContainerDied","Data":"1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e"} Nov 25 10:31:19 crc kubenswrapper[4932]: I1125 10:31:19.610875 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/octavia-api-7b799f9ff6-tshqq" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="octavia-api" probeResult="failure" output="Get \"http://10.217.1.112:9876/healthcheck\": read tcp 10.217.0.2:36618->10.217.1.112:9876: read: connection reset by peer" Nov 25 10:31:19 crc kubenswrapper[4932]: I1125 10:31:19.610891 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/octavia-api-7b799f9ff6-tshqq" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="octavia-api-provider-agent" probeResult="failure" output="Get \"http://10.217.1.112:9876/healthcheck\": read tcp 10.217.0.2:36628->10.217.1.112:9876: read: connection reset by peer" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.096498 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.182778 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-octavia-run\") pod \"9d367b12-a287-44ee-b215-7bd62c33048a\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.182898 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-ovndb-tls-certs\") pod \"9d367b12-a287-44ee-b215-7bd62c33048a\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.183049 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-scripts\") pod \"9d367b12-a287-44ee-b215-7bd62c33048a\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.183134 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-config-data\") pod \"9d367b12-a287-44ee-b215-7bd62c33048a\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.183218 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-config-data-merged\") pod \"9d367b12-a287-44ee-b215-7bd62c33048a\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.183305 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-combined-ca-bundle\") pod \"9d367b12-a287-44ee-b215-7bd62c33048a\" (UID: \"9d367b12-a287-44ee-b215-7bd62c33048a\") " Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.183345 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-octavia-run" (OuterVolumeSpecName: "octavia-run") pod "9d367b12-a287-44ee-b215-7bd62c33048a" (UID: "9d367b12-a287-44ee-b215-7bd62c33048a"). InnerVolumeSpecName "octavia-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.183837 4932 reconciler_common.go:293] "Volume detached for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-octavia-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.189146 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-scripts" (OuterVolumeSpecName: "scripts") pod "9d367b12-a287-44ee-b215-7bd62c33048a" (UID: "9d367b12-a287-44ee-b215-7bd62c33048a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.189213 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-config-data" (OuterVolumeSpecName: "config-data") pod "9d367b12-a287-44ee-b215-7bd62c33048a" (UID: "9d367b12-a287-44ee-b215-7bd62c33048a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.239257 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "9d367b12-a287-44ee-b215-7bd62c33048a" (UID: "9d367b12-a287-44ee-b215-7bd62c33048a"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.265395 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d367b12-a287-44ee-b215-7bd62c33048a" (UID: "9d367b12-a287-44ee-b215-7bd62c33048a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.285635 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.285680 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.285694 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/9d367b12-a287-44ee-b215-7bd62c33048a-config-data-merged\") on node \"crc\" DevicePath \"\"" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.285712 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.331397 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "9d367b12-a287-44ee-b215-7bd62c33048a" (UID: "9d367b12-a287-44ee-b215-7bd62c33048a"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.387208 4932 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d367b12-a287-44ee-b215-7bd62c33048a-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.579607 4932 generic.go:334] "Generic (PLEG): container finished" podID="9d367b12-a287-44ee-b215-7bd62c33048a" containerID="9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83" exitCode=0 Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.579657 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-7b799f9ff6-tshqq" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.579663 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-7b799f9ff6-tshqq" event={"ID":"9d367b12-a287-44ee-b215-7bd62c33048a","Type":"ContainerDied","Data":"9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83"} Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.579686 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-7b799f9ff6-tshqq" event={"ID":"9d367b12-a287-44ee-b215-7bd62c33048a","Type":"ContainerDied","Data":"afe69cb117847cee972d6c735ad60b2a63a3a0c8e76ebf84049517d7355fca3f"} Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.579712 4932 scope.go:117] "RemoveContainer" containerID="1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.611915 4932 scope.go:117] "RemoveContainer" containerID="9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.626036 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-api-7b799f9ff6-tshqq"] Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.635300 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-api-7b799f9ff6-tshqq"] Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.642361 4932 scope.go:117] "RemoveContainer" containerID="01b24f583218a0b89c54d8f7500514c61f7923d43d8d5a4aae14d783ac3db137" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.678143 4932 scope.go:117] "RemoveContainer" containerID="1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e" Nov 25 10:31:20 crc kubenswrapper[4932]: E1125 10:31:20.679531 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e\": container with ID starting with 1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e not found: ID does not exist" containerID="1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.679568 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e"} err="failed to get container status \"1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e\": rpc error: code = NotFound desc = could not find container \"1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e\": container with ID starting with 1ab0016af665148630db3a754fd14f875265fcac690fdf82920673c00c1a271e not found: ID does not exist" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.679598 4932 scope.go:117] "RemoveContainer" containerID="9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83" Nov 25 10:31:20 crc kubenswrapper[4932]: E1125 10:31:20.679905 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83\": container with ID starting with 9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83 not found: ID does not exist" containerID="9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.679936 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83"} err="failed to get container status \"9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83\": rpc error: code = NotFound desc = could not find container \"9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83\": container with ID starting with 9f008b40de1bb482d2e3dd71e435835529a23daeab6c72eb8c5ea121cae33f83 not found: ID does not exist" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.679953 4932 scope.go:117] "RemoveContainer" containerID="01b24f583218a0b89c54d8f7500514c61f7923d43d8d5a4aae14d783ac3db137" Nov 25 10:31:20 crc kubenswrapper[4932]: E1125 10:31:20.680182 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01b24f583218a0b89c54d8f7500514c61f7923d43d8d5a4aae14d783ac3db137\": container with ID starting with 01b24f583218a0b89c54d8f7500514c61f7923d43d8d5a4aae14d783ac3db137 not found: ID does not exist" containerID="01b24f583218a0b89c54d8f7500514c61f7923d43d8d5a4aae14d783ac3db137" Nov 25 10:31:20 crc kubenswrapper[4932]: I1125 10:31:20.680226 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01b24f583218a0b89c54d8f7500514c61f7923d43d8d5a4aae14d783ac3db137"} err="failed to get container status \"01b24f583218a0b89c54d8f7500514c61f7923d43d8d5a4aae14d783ac3db137\": rpc error: code = NotFound desc = could not find container \"01b24f583218a0b89c54d8f7500514c61f7923d43d8d5a4aae14d783ac3db137\": container with ID starting with 01b24f583218a0b89c54d8f7500514c61f7923d43d8d5a4aae14d783ac3db137 not found: ID does not exist" Nov 25 10:31:22 crc kubenswrapper[4932]: I1125 10:31:22.617163 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" path="/var/lib/kubelet/pods/9d367b12-a287-44ee-b215-7bd62c33048a/volumes" Nov 25 10:31:23 crc kubenswrapper[4932]: I1125 10:31:23.992200 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-7kzmd"] Nov 25 10:31:23 crc kubenswrapper[4932]: I1125 10:31:23.993491 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" podUID="95cbafac-1b86-480a-b491-7e19df31f063" containerName="octavia-amphora-httpd" containerID="cri-o://030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8" gracePeriod=30 Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.536525 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.627123 4932 generic.go:334] "Generic (PLEG): container finished" podID="95cbafac-1b86-480a-b491-7e19df31f063" containerID="030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8" exitCode=0 Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.627230 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" event={"ID":"95cbafac-1b86-480a-b491-7e19df31f063","Type":"ContainerDied","Data":"030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8"} Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.627269 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" event={"ID":"95cbafac-1b86-480a-b491-7e19df31f063","Type":"ContainerDied","Data":"17fb94a5ea9fb3ce7dc658e490b9eda37a95ee1299129caee6efcc15ad4b4942"} Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.627291 4932 scope.go:117] "RemoveContainer" containerID="030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.627289 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-7kzmd" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.655942 4932 scope.go:117] "RemoveContainer" containerID="6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.680817 4932 scope.go:117] "RemoveContainer" containerID="030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8" Nov 25 10:31:24 crc kubenswrapper[4932]: E1125 10:31:24.681160 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8\": container with ID starting with 030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8 not found: ID does not exist" containerID="030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.681206 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8"} err="failed to get container status \"030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8\": rpc error: code = NotFound desc = could not find container \"030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8\": container with ID starting with 030c8a7087f71eb26e0339024420fa8c944b84af503750dc8206400a31dcc7b8 not found: ID does not exist" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.681242 4932 scope.go:117] "RemoveContainer" containerID="6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250" Nov 25 10:31:24 crc kubenswrapper[4932]: E1125 10:31:24.681604 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250\": container with ID starting with 6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250 not found: ID does not exist" containerID="6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.681628 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250"} err="failed to get container status \"6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250\": rpc error: code = NotFound desc = could not find container \"6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250\": container with ID starting with 6d4f8b5f038068aa082468d6248f81fda8755d5dd4914524f98e4c3a8e6c7250 not found: ID does not exist" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.697055 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/95cbafac-1b86-480a-b491-7e19df31f063-httpd-config\") pod \"95cbafac-1b86-480a-b491-7e19df31f063\" (UID: \"95cbafac-1b86-480a-b491-7e19df31f063\") " Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.697335 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/95cbafac-1b86-480a-b491-7e19df31f063-amphora-image\") pod \"95cbafac-1b86-480a-b491-7e19df31f063\" (UID: \"95cbafac-1b86-480a-b491-7e19df31f063\") " Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.732493 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/95cbafac-1b86-480a-b491-7e19df31f063-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "95cbafac-1b86-480a-b491-7e19df31f063" (UID: "95cbafac-1b86-480a-b491-7e19df31f063"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.785325 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/95cbafac-1b86-480a-b491-7e19df31f063-amphora-image" (OuterVolumeSpecName: "amphora-image") pod "95cbafac-1b86-480a-b491-7e19df31f063" (UID: "95cbafac-1b86-480a-b491-7e19df31f063"). InnerVolumeSpecName "amphora-image". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.801663 4932 reconciler_common.go:293] "Volume detached for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/95cbafac-1b86-480a-b491-7e19df31f063-amphora-image\") on node \"crc\" DevicePath \"\"" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.801706 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/95cbafac-1b86-480a-b491-7e19df31f063-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.957091 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-7kzmd"] Nov 25 10:31:24 crc kubenswrapper[4932]: I1125 10:31:24.965034 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-7kzmd"] Nov 25 10:31:26 crc kubenswrapper[4932]: I1125 10:31:26.618631 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="95cbafac-1b86-480a-b491-7e19df31f063" path="/var/lib/kubelet/pods/95cbafac-1b86-480a-b491-7e19df31f063/volumes" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.032386 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-5955f5554b-nkglg"] Nov 25 10:31:29 crc kubenswrapper[4932]: E1125 10:31:29.033057 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="octavia-api" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.033071 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="octavia-api" Nov 25 10:31:29 crc kubenswrapper[4932]: E1125 10:31:29.033083 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95cbafac-1b86-480a-b491-7e19df31f063" containerName="octavia-amphora-httpd" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.033089 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="95cbafac-1b86-480a-b491-7e19df31f063" containerName="octavia-amphora-httpd" Nov 25 10:31:29 crc kubenswrapper[4932]: E1125 10:31:29.033102 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="octavia-api-provider-agent" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.033109 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="octavia-api-provider-agent" Nov 25 10:31:29 crc kubenswrapper[4932]: E1125 10:31:29.033126 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="init" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.033132 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="init" Nov 25 10:31:29 crc kubenswrapper[4932]: E1125 10:31:29.033147 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="95cbafac-1b86-480a-b491-7e19df31f063" containerName="init" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.033152 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="95cbafac-1b86-480a-b491-7e19df31f063" containerName="init" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.033349 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="octavia-api" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.033365 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="95cbafac-1b86-480a-b491-7e19df31f063" containerName="octavia-amphora-httpd" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.033376 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d367b12-a287-44ee-b215-7bd62c33048a" containerName="octavia-api-provider-agent" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.034364 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-nkglg" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.038336 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.042872 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-nkglg"] Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.178450 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/827f04dc-803b-4755-a121-70f598330ac5-amphora-image\") pod \"octavia-image-upload-5955f5554b-nkglg\" (UID: \"827f04dc-803b-4755-a121-70f598330ac5\") " pod="openstack/octavia-image-upload-5955f5554b-nkglg" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.178564 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/827f04dc-803b-4755-a121-70f598330ac5-httpd-config\") pod \"octavia-image-upload-5955f5554b-nkglg\" (UID: \"827f04dc-803b-4755-a121-70f598330ac5\") " pod="openstack/octavia-image-upload-5955f5554b-nkglg" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.280493 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/827f04dc-803b-4755-a121-70f598330ac5-amphora-image\") pod \"octavia-image-upload-5955f5554b-nkglg\" (UID: \"827f04dc-803b-4755-a121-70f598330ac5\") " pod="openstack/octavia-image-upload-5955f5554b-nkglg" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.280613 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/827f04dc-803b-4755-a121-70f598330ac5-httpd-config\") pod \"octavia-image-upload-5955f5554b-nkglg\" (UID: \"827f04dc-803b-4755-a121-70f598330ac5\") " pod="openstack/octavia-image-upload-5955f5554b-nkglg" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.282183 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/827f04dc-803b-4755-a121-70f598330ac5-amphora-image\") pod \"octavia-image-upload-5955f5554b-nkglg\" (UID: \"827f04dc-803b-4755-a121-70f598330ac5\") " pod="openstack/octavia-image-upload-5955f5554b-nkglg" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.287381 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/827f04dc-803b-4755-a121-70f598330ac5-httpd-config\") pod \"octavia-image-upload-5955f5554b-nkglg\" (UID: \"827f04dc-803b-4755-a121-70f598330ac5\") " pod="openstack/octavia-image-upload-5955f5554b-nkglg" Nov 25 10:31:29 crc kubenswrapper[4932]: I1125 10:31:29.365536 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-nkglg" Nov 25 10:31:30 crc kubenswrapper[4932]: I1125 10:31:29.973208 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-nkglg"] Nov 25 10:31:30 crc kubenswrapper[4932]: I1125 10:31:30.690533 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-nkglg" event={"ID":"827f04dc-803b-4755-a121-70f598330ac5","Type":"ContainerStarted","Data":"f85580968814be3aaa7e53d2bd12298c2b7edfd74fc90ef9fb09e9dc4a6c4c07"} Nov 25 10:31:30 crc kubenswrapper[4932]: I1125 10:31:30.691383 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-nkglg" event={"ID":"827f04dc-803b-4755-a121-70f598330ac5","Type":"ContainerStarted","Data":"d54c91461c5d9a873086a6a4c99027758b9dcc35352ebe6972bfb48586d7b636"} Nov 25 10:31:32 crc kubenswrapper[4932]: I1125 10:31:32.713458 4932 generic.go:334] "Generic (PLEG): container finished" podID="827f04dc-803b-4755-a121-70f598330ac5" containerID="f85580968814be3aaa7e53d2bd12298c2b7edfd74fc90ef9fb09e9dc4a6c4c07" exitCode=0 Nov 25 10:31:32 crc kubenswrapper[4932]: I1125 10:31:32.713765 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-nkglg" event={"ID":"827f04dc-803b-4755-a121-70f598330ac5","Type":"ContainerDied","Data":"f85580968814be3aaa7e53d2bd12298c2b7edfd74fc90ef9fb09e9dc4a6c4c07"} Nov 25 10:31:33 crc kubenswrapper[4932]: I1125 10:31:33.723574 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-nkglg" event={"ID":"827f04dc-803b-4755-a121-70f598330ac5","Type":"ContainerStarted","Data":"2ab87a4342c830dcc3dff7632786a877610f413f50998fb3f7e9d77bfe2c3826"} Nov 25 10:31:37 crc kubenswrapper[4932]: I1125 10:31:37.181417 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:31:37 crc kubenswrapper[4932]: I1125 10:31:37.181938 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.862501 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-5955f5554b-nkglg" podStartSLOduration=17.433235468 podStartE2EDuration="17.862479649s" podCreationTimestamp="2025-11-25 10:31:29 +0000 UTC" firstStartedPulling="2025-11-25 10:31:29.982982299 +0000 UTC m=+6150.109011862" lastFinishedPulling="2025-11-25 10:31:30.41222648 +0000 UTC m=+6150.538256043" observedRunningTime="2025-11-25 10:31:33.751558124 +0000 UTC m=+6153.877587697" watchObservedRunningTime="2025-11-25 10:31:46.862479649 +0000 UTC m=+6166.988509212" Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.868648 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-healthmanager-j2rkk"] Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.870353 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.872796 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-certs-secret" Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.872969 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-scripts" Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.874732 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-config-data" Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.887939 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-j2rkk"] Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.928163 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-amphora-certs\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.928247 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-scripts\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.928321 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-config-data\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.928340 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-combined-ca-bundle\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.928397 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-hm-ports\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:46 crc kubenswrapper[4932]: I1125 10:31:46.928426 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-config-data-merged\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.030386 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-hm-ports\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.030442 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-config-data-merged\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.030477 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-amphora-certs\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.030521 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-scripts\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.030589 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-config-data\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.030607 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-combined-ca-bundle\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.031760 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-config-data-merged\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.032282 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-hm-ports\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.036873 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-config-data\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.042023 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-combined-ca-bundle\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.042090 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-scripts\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.042144 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/2a3ba277-df30-4ee9-a848-79d1a41aa9d0-amphora-certs\") pod \"octavia-healthmanager-j2rkk\" (UID: \"2a3ba277-df30-4ee9-a848-79d1a41aa9d0\") " pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.202448 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.716154 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-j2rkk"] Nov 25 10:31:47 crc kubenswrapper[4932]: I1125 10:31:47.846954 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-j2rkk" event={"ID":"2a3ba277-df30-4ee9-a848-79d1a41aa9d0","Type":"ContainerStarted","Data":"45b0f1e2754a0f71900a8bac9d2d4f89919d244aab9d0aef7cfdc70122ca8ed8"} Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.307183 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-housekeeping-j6js4"] Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.309267 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.311583 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-config-data" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.312284 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-scripts" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.328581 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-j6js4"] Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.472998 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-scripts\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.473070 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-amphora-certs\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.473117 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/da74074d-21bc-4f9e-85b2-020df545acae-hm-ports\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.473154 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/da74074d-21bc-4f9e-85b2-020df545acae-config-data-merged\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.473255 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-config-data\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.473285 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-combined-ca-bundle\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.576000 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-scripts\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.576057 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-amphora-certs\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.576096 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/da74074d-21bc-4f9e-85b2-020df545acae-hm-ports\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.576132 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/da74074d-21bc-4f9e-85b2-020df545acae-config-data-merged\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.576158 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-config-data\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.576179 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-combined-ca-bundle\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.576902 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/da74074d-21bc-4f9e-85b2-020df545acae-config-data-merged\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.577450 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/da74074d-21bc-4f9e-85b2-020df545acae-hm-ports\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.582107 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-amphora-certs\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.582246 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-combined-ca-bundle\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.583832 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-scripts\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.588263 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/da74074d-21bc-4f9e-85b2-020df545acae-config-data\") pod \"octavia-housekeeping-j6js4\" (UID: \"da74074d-21bc-4f9e-85b2-020df545acae\") " pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.627733 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:48 crc kubenswrapper[4932]: I1125 10:31:48.861737 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-j2rkk" event={"ID":"2a3ba277-df30-4ee9-a848-79d1a41aa9d0","Type":"ContainerStarted","Data":"55301bb02b0fd5cb9f47da10d6ca55aa43a2c7f1c1e3341d1a5dd5085e2ca91c"} Nov 25 10:31:49 crc kubenswrapper[4932]: W1125 10:31:49.119290 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda74074d_21bc_4f9e_85b2_020df545acae.slice/crio-9b38ebee84711bd7e7d5a2a1a9a88f07e8e7dfa96fa038af5136957a3b8fd435 WatchSource:0}: Error finding container 9b38ebee84711bd7e7d5a2a1a9a88f07e8e7dfa96fa038af5136957a3b8fd435: Status 404 returned error can't find the container with id 9b38ebee84711bd7e7d5a2a1a9a88f07e8e7dfa96fa038af5136957a3b8fd435 Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.125214 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-j6js4"] Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.432450 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-worker-plzc4"] Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.435557 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.437572 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-scripts" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.439642 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-config-data" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.442972 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-plzc4"] Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.595172 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-amphora-certs\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.595284 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-config-data\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.595322 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-scripts\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.595497 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/636ea10c-f674-413e-9f68-5ab25c09a5fa-hm-ports\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.595710 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-combined-ca-bundle\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.595776 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/636ea10c-f674-413e-9f68-5ab25c09a5fa-config-data-merged\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.698202 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-combined-ca-bundle\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.698266 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/636ea10c-f674-413e-9f68-5ab25c09a5fa-config-data-merged\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.698418 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-amphora-certs\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.698467 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-config-data\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.698492 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-scripts\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.698545 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/636ea10c-f674-413e-9f68-5ab25c09a5fa-hm-ports\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.699966 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/636ea10c-f674-413e-9f68-5ab25c09a5fa-hm-ports\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.700105 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/636ea10c-f674-413e-9f68-5ab25c09a5fa-config-data-merged\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.705001 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-combined-ca-bundle\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.705347 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-config-data\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.705860 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-amphora-certs\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.706652 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/636ea10c-f674-413e-9f68-5ab25c09a5fa-scripts\") pod \"octavia-worker-plzc4\" (UID: \"636ea10c-f674-413e-9f68-5ab25c09a5fa\") " pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.767736 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-plzc4" Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.885858 4932 generic.go:334] "Generic (PLEG): container finished" podID="2a3ba277-df30-4ee9-a848-79d1a41aa9d0" containerID="55301bb02b0fd5cb9f47da10d6ca55aa43a2c7f1c1e3341d1a5dd5085e2ca91c" exitCode=0 Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.885938 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-j2rkk" event={"ID":"2a3ba277-df30-4ee9-a848-79d1a41aa9d0","Type":"ContainerDied","Data":"55301bb02b0fd5cb9f47da10d6ca55aa43a2c7f1c1e3341d1a5dd5085e2ca91c"} Nov 25 10:31:49 crc kubenswrapper[4932]: I1125 10:31:49.888358 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-j6js4" event={"ID":"da74074d-21bc-4f9e-85b2-020df545acae","Type":"ContainerStarted","Data":"9b38ebee84711bd7e7d5a2a1a9a88f07e8e7dfa96fa038af5136957a3b8fd435"} Nov 25 10:31:50 crc kubenswrapper[4932]: I1125 10:31:50.303041 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-plzc4"] Nov 25 10:31:50 crc kubenswrapper[4932]: W1125 10:31:50.447818 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod636ea10c_f674_413e_9f68_5ab25c09a5fa.slice/crio-c4311d0495e564745c87562837ab37069fdc5131496b2696444fa07c0e417ce2 WatchSource:0}: Error finding container c4311d0495e564745c87562837ab37069fdc5131496b2696444fa07c0e417ce2: Status 404 returned error can't find the container with id c4311d0495e564745c87562837ab37069fdc5131496b2696444fa07c0e417ce2 Nov 25 10:31:50 crc kubenswrapper[4932]: I1125 10:31:50.905251 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-plzc4" event={"ID":"636ea10c-f674-413e-9f68-5ab25c09a5fa","Type":"ContainerStarted","Data":"c4311d0495e564745c87562837ab37069fdc5131496b2696444fa07c0e417ce2"} Nov 25 10:31:50 crc kubenswrapper[4932]: I1125 10:31:50.907603 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-j2rkk" event={"ID":"2a3ba277-df30-4ee9-a848-79d1a41aa9d0","Type":"ContainerStarted","Data":"52c8f60c09efe89e78221ec7ff7e036b77d7dbced82c655ed0956bcea5ff6afc"} Nov 25 10:31:50 crc kubenswrapper[4932]: I1125 10:31:50.909074 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:31:50 crc kubenswrapper[4932]: I1125 10:31:50.933937 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-healthmanager-j2rkk" podStartSLOduration=4.93392054 podStartE2EDuration="4.93392054s" podCreationTimestamp="2025-11-25 10:31:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:31:50.926656022 +0000 UTC m=+6171.052685585" watchObservedRunningTime="2025-11-25 10:31:50.93392054 +0000 UTC m=+6171.059950103" Nov 25 10:31:51 crc kubenswrapper[4932]: I1125 10:31:51.919285 4932 generic.go:334] "Generic (PLEG): container finished" podID="da74074d-21bc-4f9e-85b2-020df545acae" containerID="217ddbe166e9cfd82c4bda2b52c4e06e5fa67f8c67095fc8506bc74d438b5093" exitCode=0 Nov 25 10:31:51 crc kubenswrapper[4932]: I1125 10:31:51.919333 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-j6js4" event={"ID":"da74074d-21bc-4f9e-85b2-020df545acae","Type":"ContainerDied","Data":"217ddbe166e9cfd82c4bda2b52c4e06e5fa67f8c67095fc8506bc74d438b5093"} Nov 25 10:31:52 crc kubenswrapper[4932]: I1125 10:31:52.931445 4932 generic.go:334] "Generic (PLEG): container finished" podID="636ea10c-f674-413e-9f68-5ab25c09a5fa" containerID="56c6e905e3c549958bd217b875ed8f2d8fc793ba9ab65a91e20df0118423d658" exitCode=0 Nov 25 10:31:52 crc kubenswrapper[4932]: I1125 10:31:52.931546 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-plzc4" event={"ID":"636ea10c-f674-413e-9f68-5ab25c09a5fa","Type":"ContainerDied","Data":"56c6e905e3c549958bd217b875ed8f2d8fc793ba9ab65a91e20df0118423d658"} Nov 25 10:31:52 crc kubenswrapper[4932]: I1125 10:31:52.934130 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-j6js4" event={"ID":"da74074d-21bc-4f9e-85b2-020df545acae","Type":"ContainerStarted","Data":"e63fe9fa444c1bc5e4be6cbd6183ad81d0701b621a834f32881d38f8d5f280ef"} Nov 25 10:31:52 crc kubenswrapper[4932]: I1125 10:31:52.976766 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-housekeeping-j6js4" podStartSLOduration=3.591213115 podStartE2EDuration="4.976748098s" podCreationTimestamp="2025-11-25 10:31:48 +0000 UTC" firstStartedPulling="2025-11-25 10:31:49.122508292 +0000 UTC m=+6169.248537855" lastFinishedPulling="2025-11-25 10:31:50.508043255 +0000 UTC m=+6170.634072838" observedRunningTime="2025-11-25 10:31:52.974848874 +0000 UTC m=+6173.100878437" watchObservedRunningTime="2025-11-25 10:31:52.976748098 +0000 UTC m=+6173.102777661" Nov 25 10:31:53 crc kubenswrapper[4932]: I1125 10:31:53.946386 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-plzc4" event={"ID":"636ea10c-f674-413e-9f68-5ab25c09a5fa","Type":"ContainerStarted","Data":"53fb9e9b3425613b2300a71b0510c07dedb8737491563225dd0d94da8d446f3f"} Nov 25 10:31:53 crc kubenswrapper[4932]: I1125 10:31:53.949298 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:31:53 crc kubenswrapper[4932]: I1125 10:31:53.949319 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-worker-plzc4" Nov 25 10:31:53 crc kubenswrapper[4932]: I1125 10:31:53.980421 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-worker-plzc4" podStartSLOduration=3.843198047 podStartE2EDuration="4.980404075s" podCreationTimestamp="2025-11-25 10:31:49 +0000 UTC" firstStartedPulling="2025-11-25 10:31:50.453908786 +0000 UTC m=+6170.579938349" lastFinishedPulling="2025-11-25 10:31:51.591114814 +0000 UTC m=+6171.717144377" observedRunningTime="2025-11-25 10:31:53.977293496 +0000 UTC m=+6174.103323079" watchObservedRunningTime="2025-11-25 10:31:53.980404075 +0000 UTC m=+6174.106433638" Nov 25 10:32:02 crc kubenswrapper[4932]: I1125 10:32:02.231629 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-healthmanager-j2rkk" Nov 25 10:32:03 crc kubenswrapper[4932]: I1125 10:32:03.660022 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-housekeeping-j6js4" Nov 25 10:32:04 crc kubenswrapper[4932]: I1125 10:32:04.808565 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-worker-plzc4" Nov 25 10:32:07 crc kubenswrapper[4932]: I1125 10:32:07.180812 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:32:07 crc kubenswrapper[4932]: I1125 10:32:07.181410 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:32:21 crc kubenswrapper[4932]: I1125 10:32:21.041781 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-j7mmg"] Nov 25 10:32:21 crc kubenswrapper[4932]: I1125 10:32:21.054397 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-a602-account-create-qjgw7"] Nov 25 10:32:21 crc kubenswrapper[4932]: I1125 10:32:21.065879 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-j7mmg"] Nov 25 10:32:21 crc kubenswrapper[4932]: I1125 10:32:21.075549 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-a602-account-create-qjgw7"] Nov 25 10:32:22 crc kubenswrapper[4932]: I1125 10:32:22.618541 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11d0a49f-4bc8-4c66-9789-02c65071777f" path="/var/lib/kubelet/pods/11d0a49f-4bc8-4c66-9789-02c65071777f/volumes" Nov 25 10:32:22 crc kubenswrapper[4932]: I1125 10:32:22.620555 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3804b75a-8646-4475-8abb-a6f1ffe8a589" path="/var/lib/kubelet/pods/3804b75a-8646-4475-8abb-a6f1ffe8a589/volumes" Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.392959 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jpwsf"] Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.395987 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.403524 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpwsf"] Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.486022 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-utilities\") pod \"redhat-marketplace-jpwsf\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.486118 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plxx5\" (UniqueName: \"kubernetes.io/projected/858081f6-a28d-4376-915f-a0ff8a39f723-kube-api-access-plxx5\") pod \"redhat-marketplace-jpwsf\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.486278 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-catalog-content\") pod \"redhat-marketplace-jpwsf\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.588547 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-utilities\") pod \"redhat-marketplace-jpwsf\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.588637 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plxx5\" (UniqueName: \"kubernetes.io/projected/858081f6-a28d-4376-915f-a0ff8a39f723-kube-api-access-plxx5\") pod \"redhat-marketplace-jpwsf\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.588695 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-catalog-content\") pod \"redhat-marketplace-jpwsf\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.589278 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-utilities\") pod \"redhat-marketplace-jpwsf\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.589343 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-catalog-content\") pod \"redhat-marketplace-jpwsf\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.608062 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plxx5\" (UniqueName: \"kubernetes.io/projected/858081f6-a28d-4376-915f-a0ff8a39f723-kube-api-access-plxx5\") pod \"redhat-marketplace-jpwsf\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:33 crc kubenswrapper[4932]: I1125 10:32:33.716460 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:34 crc kubenswrapper[4932]: I1125 10:32:34.180293 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpwsf"] Nov 25 10:32:34 crc kubenswrapper[4932]: I1125 10:32:34.466687 4932 generic.go:334] "Generic (PLEG): container finished" podID="858081f6-a28d-4376-915f-a0ff8a39f723" containerID="5a97cc40c7103c0e8408c6706772d983763d50532608ebfecb816f5b15155f2f" exitCode=0 Nov 25 10:32:34 crc kubenswrapper[4932]: I1125 10:32:34.468787 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpwsf" event={"ID":"858081f6-a28d-4376-915f-a0ff8a39f723","Type":"ContainerDied","Data":"5a97cc40c7103c0e8408c6706772d983763d50532608ebfecb816f5b15155f2f"} Nov 25 10:32:34 crc kubenswrapper[4932]: I1125 10:32:34.468852 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpwsf" event={"ID":"858081f6-a28d-4376-915f-a0ff8a39f723","Type":"ContainerStarted","Data":"f8f423eff0c53bc0f7dbaf5c54cc0f4875e63c0c47e21eb200e105a1579ea32b"} Nov 25 10:32:35 crc kubenswrapper[4932]: I1125 10:32:35.480024 4932 generic.go:334] "Generic (PLEG): container finished" podID="858081f6-a28d-4376-915f-a0ff8a39f723" containerID="61c7abfe1a9442944c5d13af31becade4535fc6ec68a38a7ef235b5d98dfca66" exitCode=0 Nov 25 10:32:35 crc kubenswrapper[4932]: I1125 10:32:35.480120 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpwsf" event={"ID":"858081f6-a28d-4376-915f-a0ff8a39f723","Type":"ContainerDied","Data":"61c7abfe1a9442944c5d13af31becade4535fc6ec68a38a7ef235b5d98dfca66"} Nov 25 10:32:36 crc kubenswrapper[4932]: I1125 10:32:36.492463 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpwsf" event={"ID":"858081f6-a28d-4376-915f-a0ff8a39f723","Type":"ContainerStarted","Data":"be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d"} Nov 25 10:32:36 crc kubenswrapper[4932]: I1125 10:32:36.523922 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jpwsf" podStartSLOduration=1.979913799 podStartE2EDuration="3.523904385s" podCreationTimestamp="2025-11-25 10:32:33 +0000 UTC" firstStartedPulling="2025-11-25 10:32:34.469701011 +0000 UTC m=+6214.595730574" lastFinishedPulling="2025-11-25 10:32:36.013691597 +0000 UTC m=+6216.139721160" observedRunningTime="2025-11-25 10:32:36.514696711 +0000 UTC m=+6216.640726274" watchObservedRunningTime="2025-11-25 10:32:36.523904385 +0000 UTC m=+6216.649933948" Nov 25 10:32:37 crc kubenswrapper[4932]: I1125 10:32:37.180456 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:32:37 crc kubenswrapper[4932]: I1125 10:32:37.180509 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:32:37 crc kubenswrapper[4932]: I1125 10:32:37.180550 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 10:32:37 crc kubenswrapper[4932]: I1125 10:32:37.181021 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d3ec26a09840ae0db21e9656db7082103181dfd412ba17c268a25c18af62eb7f"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:32:37 crc kubenswrapper[4932]: I1125 10:32:37.181072 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://d3ec26a09840ae0db21e9656db7082103181dfd412ba17c268a25c18af62eb7f" gracePeriod=600 Nov 25 10:32:37 crc kubenswrapper[4932]: I1125 10:32:37.502702 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="d3ec26a09840ae0db21e9656db7082103181dfd412ba17c268a25c18af62eb7f" exitCode=0 Nov 25 10:32:37 crc kubenswrapper[4932]: I1125 10:32:37.503365 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"d3ec26a09840ae0db21e9656db7082103181dfd412ba17c268a25c18af62eb7f"} Nov 25 10:32:37 crc kubenswrapper[4932]: I1125 10:32:37.503427 4932 scope.go:117] "RemoveContainer" containerID="e3665e709d397218184b4d062190d8230448d00c9d1ac39d89e8d275ac2342e2" Nov 25 10:32:38 crc kubenswrapper[4932]: I1125 10:32:38.515419 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1"} Nov 25 10:32:40 crc kubenswrapper[4932]: I1125 10:32:40.030373 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-n9d9q"] Nov 25 10:32:40 crc kubenswrapper[4932]: I1125 10:32:40.042301 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-n9d9q"] Nov 25 10:32:40 crc kubenswrapper[4932]: I1125 10:32:40.618736 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa464407-8a3b-4d8f-b2ff-9b24a8a523bd" path="/var/lib/kubelet/pods/fa464407-8a3b-4d8f-b2ff-9b24a8a523bd/volumes" Nov 25 10:32:43 crc kubenswrapper[4932]: I1125 10:32:43.717083 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:43 crc kubenswrapper[4932]: I1125 10:32:43.717590 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:43 crc kubenswrapper[4932]: I1125 10:32:43.765694 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:44 crc kubenswrapper[4932]: I1125 10:32:44.630406 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:44 crc kubenswrapper[4932]: I1125 10:32:44.694698 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpwsf"] Nov 25 10:32:46 crc kubenswrapper[4932]: I1125 10:32:46.595737 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jpwsf" podUID="858081f6-a28d-4376-915f-a0ff8a39f723" containerName="registry-server" containerID="cri-o://be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d" gracePeriod=2 Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.134377 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.285324 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-catalog-content\") pod \"858081f6-a28d-4376-915f-a0ff8a39f723\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.285794 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-plxx5\" (UniqueName: \"kubernetes.io/projected/858081f6-a28d-4376-915f-a0ff8a39f723-kube-api-access-plxx5\") pod \"858081f6-a28d-4376-915f-a0ff8a39f723\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.285891 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-utilities\") pod \"858081f6-a28d-4376-915f-a0ff8a39f723\" (UID: \"858081f6-a28d-4376-915f-a0ff8a39f723\") " Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.287757 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-utilities" (OuterVolumeSpecName: "utilities") pod "858081f6-a28d-4376-915f-a0ff8a39f723" (UID: "858081f6-a28d-4376-915f-a0ff8a39f723"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.305575 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "858081f6-a28d-4376-915f-a0ff8a39f723" (UID: "858081f6-a28d-4376-915f-a0ff8a39f723"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.312702 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/858081f6-a28d-4376-915f-a0ff8a39f723-kube-api-access-plxx5" (OuterVolumeSpecName: "kube-api-access-plxx5") pod "858081f6-a28d-4376-915f-a0ff8a39f723" (UID: "858081f6-a28d-4376-915f-a0ff8a39f723"). InnerVolumeSpecName "kube-api-access-plxx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.389642 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.389688 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-plxx5\" (UniqueName: \"kubernetes.io/projected/858081f6-a28d-4376-915f-a0ff8a39f723-kube-api-access-plxx5\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.389699 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/858081f6-a28d-4376-915f-a0ff8a39f723-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.609989 4932 generic.go:334] "Generic (PLEG): container finished" podID="858081f6-a28d-4376-915f-a0ff8a39f723" containerID="be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d" exitCode=0 Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.610033 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpwsf" event={"ID":"858081f6-a28d-4376-915f-a0ff8a39f723","Type":"ContainerDied","Data":"be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d"} Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.610041 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jpwsf" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.610067 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jpwsf" event={"ID":"858081f6-a28d-4376-915f-a0ff8a39f723","Type":"ContainerDied","Data":"f8f423eff0c53bc0f7dbaf5c54cc0f4875e63c0c47e21eb200e105a1579ea32b"} Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.610088 4932 scope.go:117] "RemoveContainer" containerID="be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.632017 4932 scope.go:117] "RemoveContainer" containerID="61c7abfe1a9442944c5d13af31becade4535fc6ec68a38a7ef235b5d98dfca66" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.654369 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpwsf"] Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.676012 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jpwsf"] Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.680537 4932 scope.go:117] "RemoveContainer" containerID="5a97cc40c7103c0e8408c6706772d983763d50532608ebfecb816f5b15155f2f" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.718158 4932 scope.go:117] "RemoveContainer" containerID="be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d" Nov 25 10:32:47 crc kubenswrapper[4932]: E1125 10:32:47.719596 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d\": container with ID starting with be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d not found: ID does not exist" containerID="be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.719668 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d"} err="failed to get container status \"be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d\": rpc error: code = NotFound desc = could not find container \"be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d\": container with ID starting with be20980b389feb6f2a9e0e3b0b827882ffb2a520b9f7d4d415413c49764b408d not found: ID does not exist" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.719706 4932 scope.go:117] "RemoveContainer" containerID="61c7abfe1a9442944c5d13af31becade4535fc6ec68a38a7ef235b5d98dfca66" Nov 25 10:32:47 crc kubenswrapper[4932]: E1125 10:32:47.722129 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61c7abfe1a9442944c5d13af31becade4535fc6ec68a38a7ef235b5d98dfca66\": container with ID starting with 61c7abfe1a9442944c5d13af31becade4535fc6ec68a38a7ef235b5d98dfca66 not found: ID does not exist" containerID="61c7abfe1a9442944c5d13af31becade4535fc6ec68a38a7ef235b5d98dfca66" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.722182 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61c7abfe1a9442944c5d13af31becade4535fc6ec68a38a7ef235b5d98dfca66"} err="failed to get container status \"61c7abfe1a9442944c5d13af31becade4535fc6ec68a38a7ef235b5d98dfca66\": rpc error: code = NotFound desc = could not find container \"61c7abfe1a9442944c5d13af31becade4535fc6ec68a38a7ef235b5d98dfca66\": container with ID starting with 61c7abfe1a9442944c5d13af31becade4535fc6ec68a38a7ef235b5d98dfca66 not found: ID does not exist" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.722232 4932 scope.go:117] "RemoveContainer" containerID="5a97cc40c7103c0e8408c6706772d983763d50532608ebfecb816f5b15155f2f" Nov 25 10:32:47 crc kubenswrapper[4932]: E1125 10:32:47.722893 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a97cc40c7103c0e8408c6706772d983763d50532608ebfecb816f5b15155f2f\": container with ID starting with 5a97cc40c7103c0e8408c6706772d983763d50532608ebfecb816f5b15155f2f not found: ID does not exist" containerID="5a97cc40c7103c0e8408c6706772d983763d50532608ebfecb816f5b15155f2f" Nov 25 10:32:47 crc kubenswrapper[4932]: I1125 10:32:47.722924 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a97cc40c7103c0e8408c6706772d983763d50532608ebfecb816f5b15155f2f"} err="failed to get container status \"5a97cc40c7103c0e8408c6706772d983763d50532608ebfecb816f5b15155f2f\": rpc error: code = NotFound desc = could not find container \"5a97cc40c7103c0e8408c6706772d983763d50532608ebfecb816f5b15155f2f\": container with ID starting with 5a97cc40c7103c0e8408c6706772d983763d50532608ebfecb816f5b15155f2f not found: ID does not exist" Nov 25 10:32:48 crc kubenswrapper[4932]: I1125 10:32:48.618742 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="858081f6-a28d-4376-915f-a0ff8a39f723" path="/var/lib/kubelet/pods/858081f6-a28d-4376-915f-a0ff8a39f723/volumes" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.110673 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2cg9k"] Nov 25 10:32:53 crc kubenswrapper[4932]: E1125 10:32:53.111484 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="858081f6-a28d-4376-915f-a0ff8a39f723" containerName="registry-server" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.111495 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="858081f6-a28d-4376-915f-a0ff8a39f723" containerName="registry-server" Nov 25 10:32:53 crc kubenswrapper[4932]: E1125 10:32:53.111512 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="858081f6-a28d-4376-915f-a0ff8a39f723" containerName="extract-utilities" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.111518 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="858081f6-a28d-4376-915f-a0ff8a39f723" containerName="extract-utilities" Nov 25 10:32:53 crc kubenswrapper[4932]: E1125 10:32:53.111536 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="858081f6-a28d-4376-915f-a0ff8a39f723" containerName="extract-content" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.111543 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="858081f6-a28d-4376-915f-a0ff8a39f723" containerName="extract-content" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.111725 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="858081f6-a28d-4376-915f-a0ff8a39f723" containerName="registry-server" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.113148 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.133524 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2cg9k"] Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.207934 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2ncr\" (UniqueName: \"kubernetes.io/projected/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-kube-api-access-q2ncr\") pod \"community-operators-2cg9k\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.208104 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-catalog-content\") pod \"community-operators-2cg9k\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.208134 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-utilities\") pod \"community-operators-2cg9k\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.309523 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-catalog-content\") pod \"community-operators-2cg9k\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.309575 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-utilities\") pod \"community-operators-2cg9k\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.309625 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2ncr\" (UniqueName: \"kubernetes.io/projected/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-kube-api-access-q2ncr\") pod \"community-operators-2cg9k\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.310665 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-utilities\") pod \"community-operators-2cg9k\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.310666 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-catalog-content\") pod \"community-operators-2cg9k\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.330337 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2ncr\" (UniqueName: \"kubernetes.io/projected/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-kube-api-access-q2ncr\") pod \"community-operators-2cg9k\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:32:53 crc kubenswrapper[4932]: I1125 10:32:53.447180 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.042028 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2cg9k"] Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.077534 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-84cdfd4f4c-hjhgb"] Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.079269 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.082557 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.082856 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.082992 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-2c5dk" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.083138 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.120738 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84cdfd4f4c-hjhgb"] Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.153790 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.154079 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f69b7f54-bfa9-45b9-9058-f32978b115aa" containerName="glance-log" containerID="cri-o://0b68ecf65e763f46d73c10cbcafc18a631c5e413211b52149a8c586b64123046" gracePeriod=30 Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.154231 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f69b7f54-bfa9-45b9-9058-f32978b115aa" containerName="glance-httpd" containerID="cri-o://cfcb4d993bb53c8cdf9fbd009c8d7c86a8ff41ee936820a577f8fabcdc2e6601" gracePeriod=30 Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.197111 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5d7b46cfb9-nfxv4"] Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.201602 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.214109 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.214529 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="568f6a1f-9f3c-4cda-9f7d-f844a40b4909" containerName="glance-log" containerID="cri-o://8b472d52078147695a16b60985f261b472d51b058778a2a54298189701fb2c6a" gracePeriod=30 Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.214669 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="568f6a1f-9f3c-4cda-9f7d-f844a40b4909" containerName="glance-httpd" containerID="cri-o://349c60cc55bbe03b4c6543be441fde234a2d3f06f6bf43b4563a1a30f2766417" gracePeriod=30 Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.237559 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/98d7cba9-d28a-4470-bfc1-068233ffe16e-horizon-secret-key\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.237949 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-config-data\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.238087 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98d7cba9-d28a-4470-bfc1-068233ffe16e-logs\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.238233 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-scripts\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.238446 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l9ss\" (UniqueName: \"kubernetes.io/projected/98d7cba9-d28a-4470-bfc1-068233ffe16e-kube-api-access-7l9ss\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.255110 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5d7b46cfb9-nfxv4"] Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.339879 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/98d7cba9-d28a-4470-bfc1-068233ffe16e-horizon-secret-key\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.340166 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-config-data\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.340219 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-scripts\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.340261 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-config-data\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.340313 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98d7cba9-d28a-4470-bfc1-068233ffe16e-logs\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.340335 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/472740c4-2af7-463e-b704-391d4e030519-logs\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.340360 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-scripts\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.340395 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx2qh\" (UniqueName: \"kubernetes.io/projected/472740c4-2af7-463e-b704-391d4e030519-kube-api-access-mx2qh\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.340438 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/472740c4-2af7-463e-b704-391d4e030519-horizon-secret-key\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.340483 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l9ss\" (UniqueName: \"kubernetes.io/projected/98d7cba9-d28a-4470-bfc1-068233ffe16e-kube-api-access-7l9ss\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.340813 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98d7cba9-d28a-4470-bfc1-068233ffe16e-logs\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.341220 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-scripts\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.341421 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-config-data\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.346948 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/98d7cba9-d28a-4470-bfc1-068233ffe16e-horizon-secret-key\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.356787 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l9ss\" (UniqueName: \"kubernetes.io/projected/98d7cba9-d28a-4470-bfc1-068233ffe16e-kube-api-access-7l9ss\") pod \"horizon-84cdfd4f4c-hjhgb\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.431661 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.443741 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/472740c4-2af7-463e-b704-391d4e030519-logs\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.443842 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx2qh\" (UniqueName: \"kubernetes.io/projected/472740c4-2af7-463e-b704-391d4e030519-kube-api-access-mx2qh\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.443936 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/472740c4-2af7-463e-b704-391d4e030519-horizon-secret-key\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.444067 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-config-data\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.444116 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-scripts\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.444382 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/472740c4-2af7-463e-b704-391d4e030519-logs\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.445948 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-scripts\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.447473 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-config-data\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.449314 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/472740c4-2af7-463e-b704-391d4e030519-horizon-secret-key\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.461421 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx2qh\" (UniqueName: \"kubernetes.io/projected/472740c4-2af7-463e-b704-391d4e030519-kube-api-access-mx2qh\") pod \"horizon-5d7b46cfb9-nfxv4\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.577669 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.705224 4932 generic.go:334] "Generic (PLEG): container finished" podID="568f6a1f-9f3c-4cda-9f7d-f844a40b4909" containerID="8b472d52078147695a16b60985f261b472d51b058778a2a54298189701fb2c6a" exitCode=143 Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.705301 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"568f6a1f-9f3c-4cda-9f7d-f844a40b4909","Type":"ContainerDied","Data":"8b472d52078147695a16b60985f261b472d51b058778a2a54298189701fb2c6a"} Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.737099 4932 generic.go:334] "Generic (PLEG): container finished" podID="f69b7f54-bfa9-45b9-9058-f32978b115aa" containerID="0b68ecf65e763f46d73c10cbcafc18a631c5e413211b52149a8c586b64123046" exitCode=143 Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.737156 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f69b7f54-bfa9-45b9-9058-f32978b115aa","Type":"ContainerDied","Data":"0b68ecf65e763f46d73c10cbcafc18a631c5e413211b52149a8c586b64123046"} Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.740424 4932 generic.go:334] "Generic (PLEG): container finished" podID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" containerID="33690bd3731b33d91b94de7be67966175ccfb709a1f95ab7381abcb8d69bb9bb" exitCode=0 Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.740467 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg9k" event={"ID":"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874","Type":"ContainerDied","Data":"33690bd3731b33d91b94de7be67966175ccfb709a1f95ab7381abcb8d69bb9bb"} Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.740492 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg9k" event={"ID":"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874","Type":"ContainerStarted","Data":"f0ee6c0188f7737ec7649656a7e92a6786fdf95f7fd6a25be0cadc0729cd161b"} Nov 25 10:32:54 crc kubenswrapper[4932]: I1125 10:32:54.921768 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84cdfd4f4c-hjhgb"] Nov 25 10:32:54 crc kubenswrapper[4932]: W1125 10:32:54.924945 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98d7cba9_d28a_4470_bfc1_068233ffe16e.slice/crio-db39702ecf6611db7c8ecb5fc0d1de1333d8524558711ceb87146e7e52781ce7 WatchSource:0}: Error finding container db39702ecf6611db7c8ecb5fc0d1de1333d8524558711ceb87146e7e52781ce7: Status 404 returned error can't find the container with id db39702ecf6611db7c8ecb5fc0d1de1333d8524558711ceb87146e7e52781ce7 Nov 25 10:32:55 crc kubenswrapper[4932]: I1125 10:32:55.119902 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5d7b46cfb9-nfxv4"] Nov 25 10:32:55 crc kubenswrapper[4932]: I1125 10:32:55.750677 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d7b46cfb9-nfxv4" event={"ID":"472740c4-2af7-463e-b704-391d4e030519","Type":"ContainerStarted","Data":"75d239fd51f587947a0b0446352382eb76ced4627df19444bcf767972604baa2"} Nov 25 10:32:55 crc kubenswrapper[4932]: I1125 10:32:55.752750 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg9k" event={"ID":"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874","Type":"ContainerStarted","Data":"8bb055b8151d66a0fc0f405074eb5b1cf48ba0928e8515eeebc1f822aa49e8af"} Nov 25 10:32:55 crc kubenswrapper[4932]: I1125 10:32:55.754000 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84cdfd4f4c-hjhgb" event={"ID":"98d7cba9-d28a-4470-bfc1-068233ffe16e","Type":"ContainerStarted","Data":"db39702ecf6611db7c8ecb5fc0d1de1333d8524558711ceb87146e7e52781ce7"} Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.071219 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5d7b46cfb9-nfxv4"] Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.105246 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-56d8c74498-hgqtb"] Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.107030 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.119061 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.124664 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-56d8c74498-hgqtb"] Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.192406 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nf2wl\" (UniqueName: \"kubernetes.io/projected/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-kube-api-access-nf2wl\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.192463 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-tls-certs\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.192483 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-config-data\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.192511 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-combined-ca-bundle\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.192530 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-secret-key\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.192563 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-scripts\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.192688 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-logs\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.295010 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nf2wl\" (UniqueName: \"kubernetes.io/projected/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-kube-api-access-nf2wl\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.295084 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-tls-certs\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.295115 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-config-data\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.295158 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-combined-ca-bundle\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.295393 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-secret-key\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.295465 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-scripts\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.295488 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-logs\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.296465 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-logs\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.297042 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-config-data\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.297083 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-scripts\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.304564 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-tls-certs\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.305813 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84cdfd4f4c-hjhgb"] Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.311852 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-combined-ca-bundle\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.313052 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-secret-key\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.328624 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nf2wl\" (UniqueName: \"kubernetes.io/projected/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-kube-api-access-nf2wl\") pod \"horizon-56d8c74498-hgqtb\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.364295 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-79cb94f994-d24ks"] Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.366927 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.413967 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-79cb94f994-d24ks"] Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.482040 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.507765 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-scripts\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.507810 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrhjp\" (UniqueName: \"kubernetes.io/projected/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-kube-api-access-zrhjp\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.507873 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-tls-certs\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.507900 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-logs\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.507952 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-combined-ca-bundle\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.508006 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-secret-key\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.508055 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-config-data\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.610150 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-scripts\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.610506 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrhjp\" (UniqueName: \"kubernetes.io/projected/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-kube-api-access-zrhjp\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.610551 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-tls-certs\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.610580 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-logs\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.610607 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-combined-ca-bundle\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.610707 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-secret-key\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.610742 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-config-data\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.611133 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-scripts\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.618550 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-logs\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.622294 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-config-data\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.631523 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-combined-ca-bundle\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.632611 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-secret-key\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.633623 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-tls-certs\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.636613 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrhjp\" (UniqueName: \"kubernetes.io/projected/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-kube-api-access-zrhjp\") pod \"horizon-79cb94f994-d24ks\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.754111 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.781034 4932 generic.go:334] "Generic (PLEG): container finished" podID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" containerID="8bb055b8151d66a0fc0f405074eb5b1cf48ba0928e8515eeebc1f822aa49e8af" exitCode=0 Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.781085 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg9k" event={"ID":"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874","Type":"ContainerDied","Data":"8bb055b8151d66a0fc0f405074eb5b1cf48ba0928e8515eeebc1f822aa49e8af"} Nov 25 10:32:56 crc kubenswrapper[4932]: I1125 10:32:56.976947 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-56d8c74498-hgqtb"] Nov 25 10:32:56 crc kubenswrapper[4932]: W1125 10:32:56.978824 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfad4eb9a_01d1_4463_9fe7_d373be4e68c8.slice/crio-76bbe61da922340e7265b9f31d50b0e6de63db7b24160bca6184beef5ed3606f WatchSource:0}: Error finding container 76bbe61da922340e7265b9f31d50b0e6de63db7b24160bca6184beef5ed3606f: Status 404 returned error can't find the container with id 76bbe61da922340e7265b9f31d50b0e6de63db7b24160bca6184beef5ed3606f Nov 25 10:32:57 crc kubenswrapper[4932]: I1125 10:32:57.237218 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-79cb94f994-d24ks"] Nov 25 10:32:57 crc kubenswrapper[4932]: W1125 10:32:57.241274 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0133e4ac_d1bd_455d_9997_4c0d340b9ef7.slice/crio-a7d633b15a4aa20733d9fc611a2519f0f2f5b13281eb3408938bbec94806f45f WatchSource:0}: Error finding container a7d633b15a4aa20733d9fc611a2519f0f2f5b13281eb3408938bbec94806f45f: Status 404 returned error can't find the container with id a7d633b15a4aa20733d9fc611a2519f0f2f5b13281eb3408938bbec94806f45f Nov 25 10:32:57 crc kubenswrapper[4932]: I1125 10:32:57.805743 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56d8c74498-hgqtb" event={"ID":"fad4eb9a-01d1-4463-9fe7-d373be4e68c8","Type":"ContainerStarted","Data":"76bbe61da922340e7265b9f31d50b0e6de63db7b24160bca6184beef5ed3606f"} Nov 25 10:32:57 crc kubenswrapper[4932]: I1125 10:32:57.817682 4932 generic.go:334] "Generic (PLEG): container finished" podID="f69b7f54-bfa9-45b9-9058-f32978b115aa" containerID="cfcb4d993bb53c8cdf9fbd009c8d7c86a8ff41ee936820a577f8fabcdc2e6601" exitCode=0 Nov 25 10:32:57 crc kubenswrapper[4932]: I1125 10:32:57.817788 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f69b7f54-bfa9-45b9-9058-f32978b115aa","Type":"ContainerDied","Data":"cfcb4d993bb53c8cdf9fbd009c8d7c86a8ff41ee936820a577f8fabcdc2e6601"} Nov 25 10:32:57 crc kubenswrapper[4932]: I1125 10:32:57.822874 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg9k" event={"ID":"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874","Type":"ContainerStarted","Data":"c7762c52a59fc6de7b5ff5bc20c3ccb711d3173b3193640864dc191d78a1c062"} Nov 25 10:32:57 crc kubenswrapper[4932]: I1125 10:32:57.827912 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79cb94f994-d24ks" event={"ID":"0133e4ac-d1bd-455d-9997-4c0d340b9ef7","Type":"ContainerStarted","Data":"a7d633b15a4aa20733d9fc611a2519f0f2f5b13281eb3408938bbec94806f45f"} Nov 25 10:32:57 crc kubenswrapper[4932]: I1125 10:32:57.831509 4932 generic.go:334] "Generic (PLEG): container finished" podID="568f6a1f-9f3c-4cda-9f7d-f844a40b4909" containerID="349c60cc55bbe03b4c6543be441fde234a2d3f06f6bf43b4563a1a30f2766417" exitCode=0 Nov 25 10:32:57 crc kubenswrapper[4932]: I1125 10:32:57.831581 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"568f6a1f-9f3c-4cda-9f7d-f844a40b4909","Type":"ContainerDied","Data":"349c60cc55bbe03b4c6543be441fde234a2d3f06f6bf43b4563a1a30f2766417"} Nov 25 10:32:57 crc kubenswrapper[4932]: I1125 10:32:57.967602 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.004244 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2cg9k" podStartSLOduration=2.565527677 podStartE2EDuration="5.004220852s" podCreationTimestamp="2025-11-25 10:32:53 +0000 UTC" firstStartedPulling="2025-11-25 10:32:54.744486555 +0000 UTC m=+6234.870516128" lastFinishedPulling="2025-11-25 10:32:57.18317974 +0000 UTC m=+6237.309209303" observedRunningTime="2025-11-25 10:32:57.861812967 +0000 UTC m=+6237.987842550" watchObservedRunningTime="2025-11-25 10:32:58.004220852 +0000 UTC m=+6238.130250435" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.048884 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-logs\") pod \"f69b7f54-bfa9-45b9-9058-f32978b115aa\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.048966 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-httpd-run\") pod \"f69b7f54-bfa9-45b9-9058-f32978b115aa\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.049024 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grj8w\" (UniqueName: \"kubernetes.io/projected/f69b7f54-bfa9-45b9-9058-f32978b115aa-kube-api-access-grj8w\") pod \"f69b7f54-bfa9-45b9-9058-f32978b115aa\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.049122 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-config-data\") pod \"f69b7f54-bfa9-45b9-9058-f32978b115aa\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.049161 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-scripts\") pod \"f69b7f54-bfa9-45b9-9058-f32978b115aa\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.049238 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-combined-ca-bundle\") pod \"f69b7f54-bfa9-45b9-9058-f32978b115aa\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.049268 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-public-tls-certs\") pod \"f69b7f54-bfa9-45b9-9058-f32978b115aa\" (UID: \"f69b7f54-bfa9-45b9-9058-f32978b115aa\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.049686 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-logs" (OuterVolumeSpecName: "logs") pod "f69b7f54-bfa9-45b9-9058-f32978b115aa" (UID: "f69b7f54-bfa9-45b9-9058-f32978b115aa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.050497 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.052958 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f69b7f54-bfa9-45b9-9058-f32978b115aa" (UID: "f69b7f54-bfa9-45b9-9058-f32978b115aa"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.064812 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.071946 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-scripts" (OuterVolumeSpecName: "scripts") pod "f69b7f54-bfa9-45b9-9058-f32978b115aa" (UID: "f69b7f54-bfa9-45b9-9058-f32978b115aa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.098011 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f69b7f54-bfa9-45b9-9058-f32978b115aa-kube-api-access-grj8w" (OuterVolumeSpecName: "kube-api-access-grj8w") pod "f69b7f54-bfa9-45b9-9058-f32978b115aa" (UID: "f69b7f54-bfa9-45b9-9058-f32978b115aa"). InnerVolumeSpecName "kube-api-access-grj8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.156754 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-config-data\") pod \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.156922 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-internal-tls-certs\") pod \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.156972 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-scripts\") pod \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.157150 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4lwf\" (UniqueName: \"kubernetes.io/projected/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-kube-api-access-f4lwf\") pod \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.157820 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-logs\") pod \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.157880 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-combined-ca-bundle\") pod \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.157989 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-httpd-run\") pod \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\" (UID: \"568f6a1f-9f3c-4cda-9f7d-f844a40b4909\") " Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.159628 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-logs" (OuterVolumeSpecName: "logs") pod "568f6a1f-9f3c-4cda-9f7d-f844a40b4909" (UID: "568f6a1f-9f3c-4cda-9f7d-f844a40b4909"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.160786 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "568f6a1f-9f3c-4cda-9f7d-f844a40b4909" (UID: "568f6a1f-9f3c-4cda-9f7d-f844a40b4909"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.161788 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.161828 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.161841 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.161861 4932 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f69b7f54-bfa9-45b9-9058-f32978b115aa-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.161875 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grj8w\" (UniqueName: \"kubernetes.io/projected/f69b7f54-bfa9-45b9-9058-f32978b115aa-kube-api-access-grj8w\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.192459 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-scripts" (OuterVolumeSpecName: "scripts") pod "568f6a1f-9f3c-4cda-9f7d-f844a40b4909" (UID: "568f6a1f-9f3c-4cda-9f7d-f844a40b4909"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.192538 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-kube-api-access-f4lwf" (OuterVolumeSpecName: "kube-api-access-f4lwf") pod "568f6a1f-9f3c-4cda-9f7d-f844a40b4909" (UID: "568f6a1f-9f3c-4cda-9f7d-f844a40b4909"). InnerVolumeSpecName "kube-api-access-f4lwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.253557 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "568f6a1f-9f3c-4cda-9f7d-f844a40b4909" (UID: "568f6a1f-9f3c-4cda-9f7d-f844a40b4909"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.255739 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-config-data" (OuterVolumeSpecName: "config-data") pod "568f6a1f-9f3c-4cda-9f7d-f844a40b4909" (UID: "568f6a1f-9f3c-4cda-9f7d-f844a40b4909"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.263550 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.263582 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.263591 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.263599 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4lwf\" (UniqueName: \"kubernetes.io/projected/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-kube-api-access-f4lwf\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.264455 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f69b7f54-bfa9-45b9-9058-f32978b115aa" (UID: "f69b7f54-bfa9-45b9-9058-f32978b115aa"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.268982 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f69b7f54-bfa9-45b9-9058-f32978b115aa" (UID: "f69b7f54-bfa9-45b9-9058-f32978b115aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.334583 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-config-data" (OuterVolumeSpecName: "config-data") pod "f69b7f54-bfa9-45b9-9058-f32978b115aa" (UID: "f69b7f54-bfa9-45b9-9058-f32978b115aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.341094 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "568f6a1f-9f3c-4cda-9f7d-f844a40b4909" (UID: "568f6a1f-9f3c-4cda-9f7d-f844a40b4909"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.364923 4932 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/568f6a1f-9f3c-4cda-9f7d-f844a40b4909-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.364958 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.364967 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.364975 4932 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f69b7f54-bfa9-45b9-9058-f32978b115aa-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.858305 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f69b7f54-bfa9-45b9-9058-f32978b115aa","Type":"ContainerDied","Data":"9f5d3c48314bcc7ffafebb2384e3b8c793892bf7dea1e6c418a9ede309f1239e"} Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.858330 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.858368 4932 scope.go:117] "RemoveContainer" containerID="cfcb4d993bb53c8cdf9fbd009c8d7c86a8ff41ee936820a577f8fabcdc2e6601" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.866667 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.866697 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"568f6a1f-9f3c-4cda-9f7d-f844a40b4909","Type":"ContainerDied","Data":"46bcd8b229cd66e9a784c323f770e57c610f768abea9798c94fae3b4061d4ed2"} Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.906663 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.922452 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.947492 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.968777 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:32:58 crc kubenswrapper[4932]: E1125 10:32:58.969514 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="568f6a1f-9f3c-4cda-9f7d-f844a40b4909" containerName="glance-log" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.969530 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="568f6a1f-9f3c-4cda-9f7d-f844a40b4909" containerName="glance-log" Nov 25 10:32:58 crc kubenswrapper[4932]: E1125 10:32:58.969589 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f69b7f54-bfa9-45b9-9058-f32978b115aa" containerName="glance-httpd" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.969598 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f69b7f54-bfa9-45b9-9058-f32978b115aa" containerName="glance-httpd" Nov 25 10:32:58 crc kubenswrapper[4932]: E1125 10:32:58.969612 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f69b7f54-bfa9-45b9-9058-f32978b115aa" containerName="glance-log" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.969620 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f69b7f54-bfa9-45b9-9058-f32978b115aa" containerName="glance-log" Nov 25 10:32:58 crc kubenswrapper[4932]: E1125 10:32:58.969655 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="568f6a1f-9f3c-4cda-9f7d-f844a40b4909" containerName="glance-httpd" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.969662 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="568f6a1f-9f3c-4cda-9f7d-f844a40b4909" containerName="glance-httpd" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.970127 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f69b7f54-bfa9-45b9-9058-f32978b115aa" containerName="glance-httpd" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.970162 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="568f6a1f-9f3c-4cda-9f7d-f844a40b4909" containerName="glance-log" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.970345 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="568f6a1f-9f3c-4cda-9f7d-f844a40b4909" containerName="glance-httpd" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.970373 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f69b7f54-bfa9-45b9-9058-f32978b115aa" containerName="glance-log" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.975230 4932 scope.go:117] "RemoveContainer" containerID="0b68ecf65e763f46d73c10cbcafc18a631c5e413211b52149a8c586b64123046" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.976783 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.982781 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.983174 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.983309 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.989446 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-wzvrk" Nov 25 10:32:58 crc kubenswrapper[4932]: I1125 10:32:58.994436 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.025355 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.038997 4932 scope.go:117] "RemoveContainer" containerID="349c60cc55bbe03b4c6543be441fde234a2d3f06f6bf43b4563a1a30f2766417" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.042917 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.056846 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.056940 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.060334 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.060523 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.094853 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-config-data\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.094946 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v7rf\" (UniqueName: \"kubernetes.io/projected/4b3b18ac-8e61-479a-8b92-a30e3cb86273-kube-api-access-8v7rf\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.095019 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b3b18ac-8e61-479a-8b92-a30e3cb86273-logs\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.095055 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.095096 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b3b18ac-8e61-479a-8b92-a30e3cb86273-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.095121 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-scripts\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.095140 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.137106 4932 scope.go:117] "RemoveContainer" containerID="8b472d52078147695a16b60985f261b472d51b058778a2a54298189701fb2c6a" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.198136 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b3b18ac-8e61-479a-8b92-a30e3cb86273-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.198400 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-scripts\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.198425 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.198599 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-257rf\" (UniqueName: \"kubernetes.io/projected/49303621-f303-481c-b79c-fd21f27e53d4-kube-api-access-257rf\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.198746 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49303621-f303-481c-b79c-fd21f27e53d4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.199336 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.200154 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-config-data\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.200498 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4b3b18ac-8e61-479a-8b92-a30e3cb86273-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.200630 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v7rf\" (UniqueName: \"kubernetes.io/projected/4b3b18ac-8e61-479a-8b92-a30e3cb86273-kube-api-access-8v7rf\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.200696 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.200826 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49303621-f303-481c-b79c-fd21f27e53d4-logs\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.200852 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.200890 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b3b18ac-8e61-479a-8b92-a30e3cb86273-logs\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.200961 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.200994 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.201532 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4b3b18ac-8e61-479a-8b92-a30e3cb86273-logs\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.203653 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-scripts\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.205492 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-config-data\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.206441 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.226728 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b3b18ac-8e61-479a-8b92-a30e3cb86273-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.232605 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v7rf\" (UniqueName: \"kubernetes.io/projected/4b3b18ac-8e61-479a-8b92-a30e3cb86273-kube-api-access-8v7rf\") pod \"glance-default-external-api-0\" (UID: \"4b3b18ac-8e61-479a-8b92-a30e3cb86273\") " pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.304853 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49303621-f303-481c-b79c-fd21f27e53d4-logs\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.304941 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.304996 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.305352 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/49303621-f303-481c-b79c-fd21f27e53d4-logs\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.305793 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-257rf\" (UniqueName: \"kubernetes.io/projected/49303621-f303-481c-b79c-fd21f27e53d4-kube-api-access-257rf\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.305827 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49303621-f303-481c-b79c-fd21f27e53d4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.306269 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.306408 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/49303621-f303-481c-b79c-fd21f27e53d4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.306478 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.309585 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.310742 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.311129 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.313045 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/49303621-f303-481c-b79c-fd21f27e53d4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.324158 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-257rf\" (UniqueName: \"kubernetes.io/projected/49303621-f303-481c-b79c-fd21f27e53d4-kube-api-access-257rf\") pod \"glance-default-internal-api-0\" (UID: \"49303621-f303-481c-b79c-fd21f27e53d4\") " pod="openstack/glance-default-internal-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.330688 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 10:32:59 crc kubenswrapper[4932]: I1125 10:32:59.394336 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 10:33:00 crc kubenswrapper[4932]: I1125 10:33:00.036339 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 10:33:00 crc kubenswrapper[4932]: I1125 10:33:00.120765 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 10:33:00 crc kubenswrapper[4932]: I1125 10:33:00.660846 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="568f6a1f-9f3c-4cda-9f7d-f844a40b4909" path="/var/lib/kubelet/pods/568f6a1f-9f3c-4cda-9f7d-f844a40b4909/volumes" Nov 25 10:33:00 crc kubenswrapper[4932]: I1125 10:33:00.665812 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f69b7f54-bfa9-45b9-9058-f32978b115aa" path="/var/lib/kubelet/pods/f69b7f54-bfa9-45b9-9058-f32978b115aa/volumes" Nov 25 10:33:03 crc kubenswrapper[4932]: I1125 10:33:03.047268 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-1a17-account-create-9qkmp"] Nov 25 10:33:03 crc kubenswrapper[4932]: I1125 10:33:03.062741 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-2shch"] Nov 25 10:33:03 crc kubenswrapper[4932]: I1125 10:33:03.073285 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-1a17-account-create-9qkmp"] Nov 25 10:33:03 crc kubenswrapper[4932]: I1125 10:33:03.083475 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-2shch"] Nov 25 10:33:03 crc kubenswrapper[4932]: I1125 10:33:03.447762 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:33:03 crc kubenswrapper[4932]: I1125 10:33:03.447811 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:33:03 crc kubenswrapper[4932]: I1125 10:33:03.500669 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:33:03 crc kubenswrapper[4932]: I1125 10:33:03.975627 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:33:04 crc kubenswrapper[4932]: I1125 10:33:04.594652 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2cg9k"] Nov 25 10:33:04 crc kubenswrapper[4932]: I1125 10:33:04.619543 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4bb52af-9e09-4ade-a4f8-6cfe27cc0437" path="/var/lib/kubelet/pods/c4bb52af-9e09-4ade-a4f8-6cfe27cc0437/volumes" Nov 25 10:33:04 crc kubenswrapper[4932]: I1125 10:33:04.620558 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e724d116-0845-4e49-b2bd-66b5a62c3ddf" path="/var/lib/kubelet/pods/e724d116-0845-4e49-b2bd-66b5a62c3ddf/volumes" Nov 25 10:33:05 crc kubenswrapper[4932]: W1125 10:33:05.362927 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49303621_f303_481c_b79c_fd21f27e53d4.slice/crio-0763120f47f7d6723b2285c713dcbbba820c56862b3707d71bc5cc87fdc5a0b3 WatchSource:0}: Error finding container 0763120f47f7d6723b2285c713dcbbba820c56862b3707d71bc5cc87fdc5a0b3: Status 404 returned error can't find the container with id 0763120f47f7d6723b2285c713dcbbba820c56862b3707d71bc5cc87fdc5a0b3 Nov 25 10:33:05 crc kubenswrapper[4932]: W1125 10:33:05.365437 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b3b18ac_8e61_479a_8b92_a30e3cb86273.slice/crio-df9df9b134ee48bfb7579786a4f681f695c4032e00aa4c9315010c514a5b5216 WatchSource:0}: Error finding container df9df9b134ee48bfb7579786a4f681f695c4032e00aa4c9315010c514a5b5216: Status 404 returned error can't find the container with id df9df9b134ee48bfb7579786a4f681f695c4032e00aa4c9315010c514a5b5216 Nov 25 10:33:05 crc kubenswrapper[4932]: I1125 10:33:05.941640 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4b3b18ac-8e61-479a-8b92-a30e3cb86273","Type":"ContainerStarted","Data":"df9df9b134ee48bfb7579786a4f681f695c4032e00aa4c9315010c514a5b5216"} Nov 25 10:33:05 crc kubenswrapper[4932]: I1125 10:33:05.943560 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"49303621-f303-481c-b79c-fd21f27e53d4","Type":"ContainerStarted","Data":"0763120f47f7d6723b2285c713dcbbba820c56862b3707d71bc5cc87fdc5a0b3"} Nov 25 10:33:05 crc kubenswrapper[4932]: I1125 10:33:05.943730 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2cg9k" podUID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" containerName="registry-server" containerID="cri-o://c7762c52a59fc6de7b5ff5bc20c3ccb711d3173b3193640864dc191d78a1c062" gracePeriod=2 Nov 25 10:33:06 crc kubenswrapper[4932]: I1125 10:33:06.955777 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"49303621-f303-481c-b79c-fd21f27e53d4","Type":"ContainerStarted","Data":"21df21bbfb08a4082ee471740510d7444bf3724ef5536f2db5750d04a4d10cb4"} Nov 25 10:33:06 crc kubenswrapper[4932]: I1125 10:33:06.957381 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4b3b18ac-8e61-479a-8b92-a30e3cb86273","Type":"ContainerStarted","Data":"e0c98d6747004dae5e7fd5fb2e059f856ac166b5e9cee1cefb4ecc5d8fbe8afa"} Nov 25 10:33:07 crc kubenswrapper[4932]: I1125 10:33:07.968901 4932 generic.go:334] "Generic (PLEG): container finished" podID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" containerID="c7762c52a59fc6de7b5ff5bc20c3ccb711d3173b3193640864dc191d78a1c062" exitCode=0 Nov 25 10:33:07 crc kubenswrapper[4932]: I1125 10:33:07.969011 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg9k" event={"ID":"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874","Type":"ContainerDied","Data":"c7762c52a59fc6de7b5ff5bc20c3ccb711d3173b3193640864dc191d78a1c062"} Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.422960 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.535063 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2ncr\" (UniqueName: \"kubernetes.io/projected/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-kube-api-access-q2ncr\") pod \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.535413 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-catalog-content\") pod \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.535673 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-utilities\") pod \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\" (UID: \"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874\") " Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.536832 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-utilities" (OuterVolumeSpecName: "utilities") pod "67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" (UID: "67e42fd3-d1f8-44ff-ad17-96ef7fdd6874"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.542447 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-kube-api-access-q2ncr" (OuterVolumeSpecName: "kube-api-access-q2ncr") pod "67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" (UID: "67e42fd3-d1f8-44ff-ad17-96ef7fdd6874"). InnerVolumeSpecName "kube-api-access-q2ncr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.590529 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" (UID: "67e42fd3-d1f8-44ff-ad17-96ef7fdd6874"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.638904 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.638936 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2ncr\" (UniqueName: \"kubernetes.io/projected/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-kube-api-access-q2ncr\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.638949 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.985365 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79cb94f994-d24ks" event={"ID":"0133e4ac-d1bd-455d-9997-4c0d340b9ef7","Type":"ContainerStarted","Data":"49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0"} Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.988334 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84cdfd4f4c-hjhgb" event={"ID":"98d7cba9-d28a-4470-bfc1-068233ffe16e","Type":"ContainerStarted","Data":"3ad01522d9b4ccd615f24f6f79b52b9f570c834a89e4dd12121d38c2cad8894c"} Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.990265 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56d8c74498-hgqtb" event={"ID":"fad4eb9a-01d1-4463-9fe7-d373be4e68c8","Type":"ContainerStarted","Data":"1b80a20ba98666e6c3435caf7b27ce03fc814da227c028f6fe67973eabd2250f"} Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.992436 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d7b46cfb9-nfxv4" event={"ID":"472740c4-2af7-463e-b704-391d4e030519","Type":"ContainerStarted","Data":"18ca1afc8a0bb197fc5b8218d04e313d980312fd1335ddedf673c64aa5c7d223"} Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.997646 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2cg9k" event={"ID":"67e42fd3-d1f8-44ff-ad17-96ef7fdd6874","Type":"ContainerDied","Data":"f0ee6c0188f7737ec7649656a7e92a6786fdf95f7fd6a25be0cadc0729cd161b"} Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.997707 4932 scope.go:117] "RemoveContainer" containerID="c7762c52a59fc6de7b5ff5bc20c3ccb711d3173b3193640864dc191d78a1c062" Nov 25 10:33:08 crc kubenswrapper[4932]: I1125 10:33:08.997742 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2cg9k" Nov 25 10:33:09 crc kubenswrapper[4932]: I1125 10:33:09.123625 4932 scope.go:117] "RemoveContainer" containerID="8bb055b8151d66a0fc0f405074eb5b1cf48ba0928e8515eeebc1f822aa49e8af" Nov 25 10:33:09 crc kubenswrapper[4932]: I1125 10:33:09.166376 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2cg9k"] Nov 25 10:33:09 crc kubenswrapper[4932]: I1125 10:33:09.175093 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2cg9k"] Nov 25 10:33:09 crc kubenswrapper[4932]: I1125 10:33:09.190946 4932 scope.go:117] "RemoveContainer" containerID="33690bd3731b33d91b94de7be67966175ccfb709a1f95ab7381abcb8d69bb9bb" Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.007516 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4b3b18ac-8e61-479a-8b92-a30e3cb86273","Type":"ContainerStarted","Data":"58d9645ee9c92a876d9a8dd0b0299dd9a0ae7e5e5565b116d76d2c8114fd593d"} Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.010583 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"49303621-f303-481c-b79c-fd21f27e53d4","Type":"ContainerStarted","Data":"bee8c99f0d21382b7aaa533812c299a1d69bf9bc83c8466b2da8ee1da334b124"} Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.012669 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d7b46cfb9-nfxv4" event={"ID":"472740c4-2af7-463e-b704-391d4e030519","Type":"ContainerStarted","Data":"21d444a270946bbf71487001e1f1f1799a87013167974d443e6133d76934c9f9"} Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.012848 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5d7b46cfb9-nfxv4" podUID="472740c4-2af7-463e-b704-391d4e030519" containerName="horizon" containerID="cri-o://21d444a270946bbf71487001e1f1f1799a87013167974d443e6133d76934c9f9" gracePeriod=30 Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.012843 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5d7b46cfb9-nfxv4" podUID="472740c4-2af7-463e-b704-391d4e030519" containerName="horizon-log" containerID="cri-o://18ca1afc8a0bb197fc5b8218d04e313d980312fd1335ddedf673c64aa5c7d223" gracePeriod=30 Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.015345 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79cb94f994-d24ks" event={"ID":"0133e4ac-d1bd-455d-9997-4c0d340b9ef7","Type":"ContainerStarted","Data":"af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a"} Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.017181 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84cdfd4f4c-hjhgb" event={"ID":"98d7cba9-d28a-4470-bfc1-068233ffe16e","Type":"ContainerStarted","Data":"469637add380e137efac31dcdb3ea9f3064e2fc1fd6b7f5dd5d5d0642cc69c06"} Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.017361 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-84cdfd4f4c-hjhgb" podUID="98d7cba9-d28a-4470-bfc1-068233ffe16e" containerName="horizon-log" containerID="cri-o://3ad01522d9b4ccd615f24f6f79b52b9f570c834a89e4dd12121d38c2cad8894c" gracePeriod=30 Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.017435 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-84cdfd4f4c-hjhgb" podUID="98d7cba9-d28a-4470-bfc1-068233ffe16e" containerName="horizon" containerID="cri-o://469637add380e137efac31dcdb3ea9f3064e2fc1fd6b7f5dd5d5d0642cc69c06" gracePeriod=30 Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.022651 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56d8c74498-hgqtb" event={"ID":"fad4eb9a-01d1-4463-9fe7-d373be4e68c8","Type":"ContainerStarted","Data":"8fe69ccf4df628c561a5d26e535745341eefc2dc956a47224b332e90dc2fcb87"} Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.030917 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=12.030899375 podStartE2EDuration="12.030899375s" podCreationTimestamp="2025-11-25 10:32:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:33:10.027688813 +0000 UTC m=+6250.153718376" watchObservedRunningTime="2025-11-25 10:33:10.030899375 +0000 UTC m=+6250.156928938" Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.060392 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-56d8c74498-hgqtb" podStartSLOduration=2.4711307 podStartE2EDuration="14.060370608s" podCreationTimestamp="2025-11-25 10:32:56 +0000 UTC" firstStartedPulling="2025-11-25 10:32:56.981231632 +0000 UTC m=+6237.107261195" lastFinishedPulling="2025-11-25 10:33:08.57047154 +0000 UTC m=+6248.696501103" observedRunningTime="2025-11-25 10:33:10.052222915 +0000 UTC m=+6250.178252478" watchObservedRunningTime="2025-11-25 10:33:10.060370608 +0000 UTC m=+6250.186400171" Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.109560 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=12.109534155 podStartE2EDuration="12.109534155s" podCreationTimestamp="2025-11-25 10:32:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:33:10.072768753 +0000 UTC m=+6250.198798326" watchObservedRunningTime="2025-11-25 10:33:10.109534155 +0000 UTC m=+6250.235563718" Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.113851 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-79cb94f994-d24ks" podStartSLOduration=2.803730047 podStartE2EDuration="14.113835078s" podCreationTimestamp="2025-11-25 10:32:56 +0000 UTC" firstStartedPulling="2025-11-25 10:32:57.243521047 +0000 UTC m=+6237.369550610" lastFinishedPulling="2025-11-25 10:33:08.553626078 +0000 UTC m=+6248.679655641" observedRunningTime="2025-11-25 10:33:10.111715708 +0000 UTC m=+6250.237745291" watchObservedRunningTime="2025-11-25 10:33:10.113835078 +0000 UTC m=+6250.239864641" Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.142482 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-84cdfd4f4c-hjhgb" podStartSLOduration=2.572328123 podStartE2EDuration="16.142460947s" podCreationTimestamp="2025-11-25 10:32:54 +0000 UTC" firstStartedPulling="2025-11-25 10:32:54.928098579 +0000 UTC m=+6235.054128142" lastFinishedPulling="2025-11-25 10:33:08.498231393 +0000 UTC m=+6248.624260966" observedRunningTime="2025-11-25 10:33:10.129674551 +0000 UTC m=+6250.255704114" watchObservedRunningTime="2025-11-25 10:33:10.142460947 +0000 UTC m=+6250.268490510" Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.151024 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5d7b46cfb9-nfxv4" podStartSLOduration=2.594442666 podStartE2EDuration="16.151008532s" podCreationTimestamp="2025-11-25 10:32:54 +0000 UTC" firstStartedPulling="2025-11-25 10:32:55.126813044 +0000 UTC m=+6235.252842607" lastFinishedPulling="2025-11-25 10:33:08.68337891 +0000 UTC m=+6248.809408473" observedRunningTime="2025-11-25 10:33:10.14848071 +0000 UTC m=+6250.274510263" watchObservedRunningTime="2025-11-25 10:33:10.151008532 +0000 UTC m=+6250.277038085" Nov 25 10:33:10 crc kubenswrapper[4932]: I1125 10:33:10.627012 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" path="/var/lib/kubelet/pods/67e42fd3-d1f8-44ff-ad17-96ef7fdd6874/volumes" Nov 25 10:33:11 crc kubenswrapper[4932]: I1125 10:33:11.050091 4932 scope.go:117] "RemoveContainer" containerID="b4fb770e7260d9edc09bde16771161de58218ed6224fbd67095b20946055b9f7" Nov 25 10:33:11 crc kubenswrapper[4932]: I1125 10:33:11.084919 4932 scope.go:117] "RemoveContainer" containerID="b5d5627eee4ab8dfb81479b2558194c26b712b5720f96f118b35730869ce76c0" Nov 25 10:33:11 crc kubenswrapper[4932]: I1125 10:33:11.158120 4932 scope.go:117] "RemoveContainer" containerID="55cf75288f2c2f60f78d5895a70e836a0e5cf8189a31b224d621844e141bab94" Nov 25 10:33:11 crc kubenswrapper[4932]: I1125 10:33:11.232997 4932 scope.go:117] "RemoveContainer" containerID="392cdbd783296c21c94b00955fe1c6cca5fd492a37699af647ab31baa5e264cc" Nov 25 10:33:11 crc kubenswrapper[4932]: I1125 10:33:11.274601 4932 scope.go:117] "RemoveContainer" containerID="a292be5c7abe174ef90e235e0074793677aad99f5dffc55489836b2a599a2e72" Nov 25 10:33:12 crc kubenswrapper[4932]: I1125 10:33:12.033361 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-7chrd"] Nov 25 10:33:12 crc kubenswrapper[4932]: I1125 10:33:12.044233 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-7chrd"] Nov 25 10:33:12 crc kubenswrapper[4932]: I1125 10:33:12.618763 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e9c29f9-2e9f-499c-8902-4f500bc57328" path="/var/lib/kubelet/pods/6e9c29f9-2e9f-499c-8902-4f500bc57328/volumes" Nov 25 10:33:14 crc kubenswrapper[4932]: I1125 10:33:14.432049 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:33:14 crc kubenswrapper[4932]: I1125 10:33:14.578014 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:33:16 crc kubenswrapper[4932]: I1125 10:33:16.482126 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:33:16 crc kubenswrapper[4932]: I1125 10:33:16.482499 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:33:16 crc kubenswrapper[4932]: I1125 10:33:16.756067 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:33:16 crc kubenswrapper[4932]: I1125 10:33:16.756129 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:33:19 crc kubenswrapper[4932]: I1125 10:33:19.331673 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 10:33:19 crc kubenswrapper[4932]: I1125 10:33:19.332410 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 10:33:19 crc kubenswrapper[4932]: I1125 10:33:19.365245 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 10:33:19 crc kubenswrapper[4932]: I1125 10:33:19.380375 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 10:33:19 crc kubenswrapper[4932]: I1125 10:33:19.394884 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 10:33:19 crc kubenswrapper[4932]: I1125 10:33:19.396633 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 10:33:19 crc kubenswrapper[4932]: I1125 10:33:19.433708 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 10:33:19 crc kubenswrapper[4932]: I1125 10:33:19.464519 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 10:33:20 crc kubenswrapper[4932]: I1125 10:33:20.136407 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 10:33:20 crc kubenswrapper[4932]: I1125 10:33:20.136711 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 10:33:20 crc kubenswrapper[4932]: I1125 10:33:20.136724 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 10:33:20 crc kubenswrapper[4932]: I1125 10:33:20.136733 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 10:33:23 crc kubenswrapper[4932]: I1125 10:33:23.842209 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 10:33:23 crc kubenswrapper[4932]: I1125 10:33:23.842717 4932 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:33:23 crc kubenswrapper[4932]: I1125 10:33:23.843101 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 10:33:23 crc kubenswrapper[4932]: I1125 10:33:23.852401 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 10:33:23 crc kubenswrapper[4932]: I1125 10:33:23.852521 4932 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 10:33:23 crc kubenswrapper[4932]: I1125 10:33:23.856916 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 10:33:26 crc kubenswrapper[4932]: I1125 10:33:26.483295 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-56d8c74498-hgqtb" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.127:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.127:8443: connect: connection refused" Nov 25 10:33:26 crc kubenswrapper[4932]: I1125 10:33:26.757357 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-79cb94f994-d24ks" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.128:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.128:8443: connect: connection refused" Nov 25 10:33:38 crc kubenswrapper[4932]: I1125 10:33:38.573252 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:33:39 crc kubenswrapper[4932]: I1125 10:33:39.029359 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.326782 4932 generic.go:334] "Generic (PLEG): container finished" podID="98d7cba9-d28a-4470-bfc1-068233ffe16e" containerID="469637add380e137efac31dcdb3ea9f3064e2fc1fd6b7f5dd5d5d0642cc69c06" exitCode=137 Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.327312 4932 generic.go:334] "Generic (PLEG): container finished" podID="98d7cba9-d28a-4470-bfc1-068233ffe16e" containerID="3ad01522d9b4ccd615f24f6f79b52b9f570c834a89e4dd12121d38c2cad8894c" exitCode=137 Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.327361 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84cdfd4f4c-hjhgb" event={"ID":"98d7cba9-d28a-4470-bfc1-068233ffe16e","Type":"ContainerDied","Data":"469637add380e137efac31dcdb3ea9f3064e2fc1fd6b7f5dd5d5d0642cc69c06"} Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.327391 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84cdfd4f4c-hjhgb" event={"ID":"98d7cba9-d28a-4470-bfc1-068233ffe16e","Type":"ContainerDied","Data":"3ad01522d9b4ccd615f24f6f79b52b9f570c834a89e4dd12121d38c2cad8894c"} Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.329431 4932 generic.go:334] "Generic (PLEG): container finished" podID="472740c4-2af7-463e-b704-391d4e030519" containerID="21d444a270946bbf71487001e1f1f1799a87013167974d443e6133d76934c9f9" exitCode=137 Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.329452 4932 generic.go:334] "Generic (PLEG): container finished" podID="472740c4-2af7-463e-b704-391d4e030519" containerID="18ca1afc8a0bb197fc5b8218d04e313d980312fd1335ddedf673c64aa5c7d223" exitCode=137 Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.329468 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d7b46cfb9-nfxv4" event={"ID":"472740c4-2af7-463e-b704-391d4e030519","Type":"ContainerDied","Data":"21d444a270946bbf71487001e1f1f1799a87013167974d443e6133d76934c9f9"} Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.329486 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d7b46cfb9-nfxv4" event={"ID":"472740c4-2af7-463e-b704-391d4e030519","Type":"ContainerDied","Data":"18ca1afc8a0bb197fc5b8218d04e313d980312fd1335ddedf673c64aa5c7d223"} Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.468969 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.571563 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.701414 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/98d7cba9-d28a-4470-bfc1-068233ffe16e-horizon-secret-key\") pod \"98d7cba9-d28a-4470-bfc1-068233ffe16e\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.701482 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-config-data\") pod \"98d7cba9-d28a-4470-bfc1-068233ffe16e\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.701629 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7l9ss\" (UniqueName: \"kubernetes.io/projected/98d7cba9-d28a-4470-bfc1-068233ffe16e-kube-api-access-7l9ss\") pod \"98d7cba9-d28a-4470-bfc1-068233ffe16e\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.701665 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-scripts\") pod \"98d7cba9-d28a-4470-bfc1-068233ffe16e\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.701847 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98d7cba9-d28a-4470-bfc1-068233ffe16e-logs\") pod \"98d7cba9-d28a-4470-bfc1-068233ffe16e\" (UID: \"98d7cba9-d28a-4470-bfc1-068233ffe16e\") " Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.705232 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98d7cba9-d28a-4470-bfc1-068233ffe16e-logs" (OuterVolumeSpecName: "logs") pod "98d7cba9-d28a-4470-bfc1-068233ffe16e" (UID: "98d7cba9-d28a-4470-bfc1-068233ffe16e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.708109 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98d7cba9-d28a-4470-bfc1-068233ffe16e-kube-api-access-7l9ss" (OuterVolumeSpecName: "kube-api-access-7l9ss") pod "98d7cba9-d28a-4470-bfc1-068233ffe16e" (UID: "98d7cba9-d28a-4470-bfc1-068233ffe16e"). InnerVolumeSpecName "kube-api-access-7l9ss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.708787 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/98d7cba9-d28a-4470-bfc1-068233ffe16e-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.708812 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7l9ss\" (UniqueName: \"kubernetes.io/projected/98d7cba9-d28a-4470-bfc1-068233ffe16e-kube-api-access-7l9ss\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.709521 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98d7cba9-d28a-4470-bfc1-068233ffe16e-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "98d7cba9-d28a-4470-bfc1-068233ffe16e" (UID: "98d7cba9-d28a-4470-bfc1-068233ffe16e"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.738464 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-scripts" (OuterVolumeSpecName: "scripts") pod "98d7cba9-d28a-4470-bfc1-068233ffe16e" (UID: "98d7cba9-d28a-4470-bfc1-068233ffe16e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.742928 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-config-data" (OuterVolumeSpecName: "config-data") pod "98d7cba9-d28a-4470-bfc1-068233ffe16e" (UID: "98d7cba9-d28a-4470-bfc1-068233ffe16e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.811851 4932 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/98d7cba9-d28a-4470-bfc1-068233ffe16e-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.811893 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.811904 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/98d7cba9-d28a-4470-bfc1-068233ffe16e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:40 crc kubenswrapper[4932]: I1125 10:33:40.964416 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.000657 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.072457 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-56d8c74498-hgqtb"] Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.116920 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-config-data\") pod \"472740c4-2af7-463e-b704-391d4e030519\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.116991 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/472740c4-2af7-463e-b704-391d4e030519-logs\") pod \"472740c4-2af7-463e-b704-391d4e030519\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.117104 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mx2qh\" (UniqueName: \"kubernetes.io/projected/472740c4-2af7-463e-b704-391d4e030519-kube-api-access-mx2qh\") pod \"472740c4-2af7-463e-b704-391d4e030519\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.117136 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-scripts\") pod \"472740c4-2af7-463e-b704-391d4e030519\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.117186 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/472740c4-2af7-463e-b704-391d4e030519-horizon-secret-key\") pod \"472740c4-2af7-463e-b704-391d4e030519\" (UID: \"472740c4-2af7-463e-b704-391d4e030519\") " Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.117989 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/472740c4-2af7-463e-b704-391d4e030519-logs" (OuterVolumeSpecName: "logs") pod "472740c4-2af7-463e-b704-391d4e030519" (UID: "472740c4-2af7-463e-b704-391d4e030519"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.129941 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/472740c4-2af7-463e-b704-391d4e030519-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "472740c4-2af7-463e-b704-391d4e030519" (UID: "472740c4-2af7-463e-b704-391d4e030519"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.130911 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/472740c4-2af7-463e-b704-391d4e030519-kube-api-access-mx2qh" (OuterVolumeSpecName: "kube-api-access-mx2qh") pod "472740c4-2af7-463e-b704-391d4e030519" (UID: "472740c4-2af7-463e-b704-391d4e030519"). InnerVolumeSpecName "kube-api-access-mx2qh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.148476 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-config-data" (OuterVolumeSpecName: "config-data") pod "472740c4-2af7-463e-b704-391d4e030519" (UID: "472740c4-2af7-463e-b704-391d4e030519"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.151016 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-scripts" (OuterVolumeSpecName: "scripts") pod "472740c4-2af7-463e-b704-391d4e030519" (UID: "472740c4-2af7-463e-b704-391d4e030519"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.220911 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.222578 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/472740c4-2af7-463e-b704-391d4e030519-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.222601 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mx2qh\" (UniqueName: \"kubernetes.io/projected/472740c4-2af7-463e-b704-391d4e030519-kube-api-access-mx2qh\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.222614 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/472740c4-2af7-463e-b704-391d4e030519-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.222627 4932 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/472740c4-2af7-463e-b704-391d4e030519-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.342415 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d7b46cfb9-nfxv4" event={"ID":"472740c4-2af7-463e-b704-391d4e030519","Type":"ContainerDied","Data":"75d239fd51f587947a0b0446352382eb76ced4627df19444bcf767972604baa2"} Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.342465 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d7b46cfb9-nfxv4" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.342482 4932 scope.go:117] "RemoveContainer" containerID="21d444a270946bbf71487001e1f1f1799a87013167974d443e6133d76934c9f9" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.346378 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84cdfd4f4c-hjhgb" event={"ID":"98d7cba9-d28a-4470-bfc1-068233ffe16e","Type":"ContainerDied","Data":"db39702ecf6611db7c8ecb5fc0d1de1333d8524558711ceb87146e7e52781ce7"} Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.346409 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-56d8c74498-hgqtb" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon-log" containerID="cri-o://1b80a20ba98666e6c3435caf7b27ce03fc814da227c028f6fe67973eabd2250f" gracePeriod=30 Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.346452 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-56d8c74498-hgqtb" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon" containerID="cri-o://8fe69ccf4df628c561a5d26e535745341eefc2dc956a47224b332e90dc2fcb87" gracePeriod=30 Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.346642 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84cdfd4f4c-hjhgb" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.388286 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5d7b46cfb9-nfxv4"] Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.399043 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5d7b46cfb9-nfxv4"] Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.409412 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84cdfd4f4c-hjhgb"] Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.419240 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-84cdfd4f4c-hjhgb"] Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.531705 4932 scope.go:117] "RemoveContainer" containerID="18ca1afc8a0bb197fc5b8218d04e313d980312fd1335ddedf673c64aa5c7d223" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.552057 4932 scope.go:117] "RemoveContainer" containerID="469637add380e137efac31dcdb3ea9f3064e2fc1fd6b7f5dd5d5d0642cc69c06" Nov 25 10:33:41 crc kubenswrapper[4932]: I1125 10:33:41.749144 4932 scope.go:117] "RemoveContainer" containerID="3ad01522d9b4ccd615f24f6f79b52b9f570c834a89e4dd12121d38c2cad8894c" Nov 25 10:33:42 crc kubenswrapper[4932]: I1125 10:33:42.617954 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="472740c4-2af7-463e-b704-391d4e030519" path="/var/lib/kubelet/pods/472740c4-2af7-463e-b704-391d4e030519/volumes" Nov 25 10:33:42 crc kubenswrapper[4932]: I1125 10:33:42.618732 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98d7cba9-d28a-4470-bfc1-068233ffe16e" path="/var/lib/kubelet/pods/98d7cba9-d28a-4470-bfc1-068233ffe16e/volumes" Nov 25 10:33:45 crc kubenswrapper[4932]: I1125 10:33:45.407471 4932 generic.go:334] "Generic (PLEG): container finished" podID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerID="8fe69ccf4df628c561a5d26e535745341eefc2dc956a47224b332e90dc2fcb87" exitCode=0 Nov 25 10:33:45 crc kubenswrapper[4932]: I1125 10:33:45.407625 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56d8c74498-hgqtb" event={"ID":"fad4eb9a-01d1-4463-9fe7-d373be4e68c8","Type":"ContainerDied","Data":"8fe69ccf4df628c561a5d26e535745341eefc2dc956a47224b332e90dc2fcb87"} Nov 25 10:33:46 crc kubenswrapper[4932]: I1125 10:33:46.482423 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-56d8c74498-hgqtb" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.127:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.127:8443: connect: connection refused" Nov 25 10:33:56 crc kubenswrapper[4932]: I1125 10:33:56.482546 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-56d8c74498-hgqtb" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.127:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.127:8443: connect: connection refused" Nov 25 10:34:06 crc kubenswrapper[4932]: I1125 10:34:06.482655 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-56d8c74498-hgqtb" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.127:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.127:8443: connect: connection refused" Nov 25 10:34:06 crc kubenswrapper[4932]: I1125 10:34:06.484472 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.586236 4932 scope.go:117] "RemoveContainer" containerID="685337404dc9c6f4bc92b51f6a82ba5beed1ca22df709eed9986a60af8edbaf6" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.653086 4932 scope.go:117] "RemoveContainer" containerID="bf103e93edc2bf9d2e8e1541bf3f59b3bcc9454b1c86186111c93bb179633a49" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.656371 4932 generic.go:334] "Generic (PLEG): container finished" podID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerID="1b80a20ba98666e6c3435caf7b27ce03fc814da227c028f6fe67973eabd2250f" exitCode=137 Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.656418 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56d8c74498-hgqtb" event={"ID":"fad4eb9a-01d1-4463-9fe7-d373be4e68c8","Type":"ContainerDied","Data":"1b80a20ba98666e6c3435caf7b27ce03fc814da227c028f6fe67973eabd2250f"} Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.704583 4932 scope.go:117] "RemoveContainer" containerID="3023e7835d395628606ba55336c1a0d6272dbe01c002094e67b5bc6ceb74ada5" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.776930 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.859983 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-logs\") pod \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.860128 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-combined-ca-bundle\") pod \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.860158 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-config-data\") pod \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.860337 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-scripts\") pod \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.860396 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-tls-certs\") pod \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.860425 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nf2wl\" (UniqueName: \"kubernetes.io/projected/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-kube-api-access-nf2wl\") pod \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.860459 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-secret-key\") pod \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\" (UID: \"fad4eb9a-01d1-4463-9fe7-d373be4e68c8\") " Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.860708 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-logs" (OuterVolumeSpecName: "logs") pod "fad4eb9a-01d1-4463-9fe7-d373be4e68c8" (UID: "fad4eb9a-01d1-4463-9fe7-d373be4e68c8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.860959 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.868261 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "fad4eb9a-01d1-4463-9fe7-d373be4e68c8" (UID: "fad4eb9a-01d1-4463-9fe7-d373be4e68c8"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.868336 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-kube-api-access-nf2wl" (OuterVolumeSpecName: "kube-api-access-nf2wl") pod "fad4eb9a-01d1-4463-9fe7-d373be4e68c8" (UID: "fad4eb9a-01d1-4463-9fe7-d373be4e68c8"). InnerVolumeSpecName "kube-api-access-nf2wl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.889132 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-scripts" (OuterVolumeSpecName: "scripts") pod "fad4eb9a-01d1-4463-9fe7-d373be4e68c8" (UID: "fad4eb9a-01d1-4463-9fe7-d373be4e68c8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.894517 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-config-data" (OuterVolumeSpecName: "config-data") pod "fad4eb9a-01d1-4463-9fe7-d373be4e68c8" (UID: "fad4eb9a-01d1-4463-9fe7-d373be4e68c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.900082 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fad4eb9a-01d1-4463-9fe7-d373be4e68c8" (UID: "fad4eb9a-01d1-4463-9fe7-d373be4e68c8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.925365 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "fad4eb9a-01d1-4463-9fe7-d373be4e68c8" (UID: "fad4eb9a-01d1-4463-9fe7-d373be4e68c8"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.962642 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.962682 4932 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.962694 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nf2wl\" (UniqueName: \"kubernetes.io/projected/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-kube-api-access-nf2wl\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.962703 4932 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.962713 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:11 crc kubenswrapper[4932]: I1125 10:34:11.962721 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fad4eb9a-01d1-4463-9fe7-d373be4e68c8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:12 crc kubenswrapper[4932]: I1125 10:34:12.667311 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-56d8c74498-hgqtb" event={"ID":"fad4eb9a-01d1-4463-9fe7-d373be4e68c8","Type":"ContainerDied","Data":"76bbe61da922340e7265b9f31d50b0e6de63db7b24160bca6184beef5ed3606f"} Nov 25 10:34:12 crc kubenswrapper[4932]: I1125 10:34:12.667382 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-56d8c74498-hgqtb" Nov 25 10:34:12 crc kubenswrapper[4932]: I1125 10:34:12.667689 4932 scope.go:117] "RemoveContainer" containerID="8fe69ccf4df628c561a5d26e535745341eefc2dc956a47224b332e90dc2fcb87" Nov 25 10:34:12 crc kubenswrapper[4932]: I1125 10:34:12.696121 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-56d8c74498-hgqtb"] Nov 25 10:34:12 crc kubenswrapper[4932]: I1125 10:34:12.705754 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-56d8c74498-hgqtb"] Nov 25 10:34:12 crc kubenswrapper[4932]: I1125 10:34:12.841323 4932 scope.go:117] "RemoveContainer" containerID="1b80a20ba98666e6c3435caf7b27ce03fc814da227c028f6fe67973eabd2250f" Nov 25 10:34:14 crc kubenswrapper[4932]: I1125 10:34:14.617525 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" path="/var/lib/kubelet/pods/fad4eb9a-01d1-4463-9fe7-d373be4e68c8/volumes" Nov 25 10:34:35 crc kubenswrapper[4932]: I1125 10:34:35.043458 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-zs9cx"] Nov 25 10:34:35 crc kubenswrapper[4932]: I1125 10:34:35.053778 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-2beb-account-create-f77ql"] Nov 25 10:34:35 crc kubenswrapper[4932]: I1125 10:34:35.062584 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-2beb-account-create-f77ql"] Nov 25 10:34:35 crc kubenswrapper[4932]: I1125 10:34:35.070416 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-zs9cx"] Nov 25 10:34:36 crc kubenswrapper[4932]: I1125 10:34:36.618013 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="365361a1-846d-43f6-9423-e09278bb603b" path="/var/lib/kubelet/pods/365361a1-846d-43f6-9423-e09278bb603b/volumes" Nov 25 10:34:36 crc kubenswrapper[4932]: I1125 10:34:36.618992 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8bc4a14-5db6-46c1-88e9-448799276e2e" path="/var/lib/kubelet/pods/e8bc4a14-5db6-46c1-88e9-448799276e2e/volumes" Nov 25 10:34:37 crc kubenswrapper[4932]: I1125 10:34:37.181544 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:34:37 crc kubenswrapper[4932]: I1125 10:34:37.181615 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:34:44 crc kubenswrapper[4932]: I1125 10:34:44.027503 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-mwtbt"] Nov 25 10:34:44 crc kubenswrapper[4932]: I1125 10:34:44.038902 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-mwtbt"] Nov 25 10:34:44 crc kubenswrapper[4932]: I1125 10:34:44.616821 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5deeb987-1aff-4645-9018-492f7517dcc6" path="/var/lib/kubelet/pods/5deeb987-1aff-4645-9018-492f7517dcc6/volumes" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.781834 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5d9c956f9d-95gps"] Nov 25 10:34:46 crc kubenswrapper[4932]: E1125 10:34:46.782922 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98d7cba9-d28a-4470-bfc1-068233ffe16e" containerName="horizon-log" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.782942 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="98d7cba9-d28a-4470-bfc1-068233ffe16e" containerName="horizon-log" Nov 25 10:34:46 crc kubenswrapper[4932]: E1125 10:34:46.782952 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon-log" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.782959 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon-log" Nov 25 10:34:46 crc kubenswrapper[4932]: E1125 10:34:46.782972 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.782981 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon" Nov 25 10:34:46 crc kubenswrapper[4932]: E1125 10:34:46.782996 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" containerName="extract-content" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783003 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" containerName="extract-content" Nov 25 10:34:46 crc kubenswrapper[4932]: E1125 10:34:46.783041 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" containerName="registry-server" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783050 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" containerName="registry-server" Nov 25 10:34:46 crc kubenswrapper[4932]: E1125 10:34:46.783074 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" containerName="extract-utilities" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783081 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" containerName="extract-utilities" Nov 25 10:34:46 crc kubenswrapper[4932]: E1125 10:34:46.783092 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="472740c4-2af7-463e-b704-391d4e030519" containerName="horizon-log" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783101 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="472740c4-2af7-463e-b704-391d4e030519" containerName="horizon-log" Nov 25 10:34:46 crc kubenswrapper[4932]: E1125 10:34:46.783110 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="472740c4-2af7-463e-b704-391d4e030519" containerName="horizon" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783116 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="472740c4-2af7-463e-b704-391d4e030519" containerName="horizon" Nov 25 10:34:46 crc kubenswrapper[4932]: E1125 10:34:46.783133 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98d7cba9-d28a-4470-bfc1-068233ffe16e" containerName="horizon" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783142 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="98d7cba9-d28a-4470-bfc1-068233ffe16e" containerName="horizon" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783403 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon-log" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783422 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="472740c4-2af7-463e-b704-391d4e030519" containerName="horizon" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783437 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="98d7cba9-d28a-4470-bfc1-068233ffe16e" containerName="horizon" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783453 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="472740c4-2af7-463e-b704-391d4e030519" containerName="horizon-log" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783465 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="fad4eb9a-01d1-4463-9fe7-d373be4e68c8" containerName="horizon" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783473 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="67e42fd3-d1f8-44ff-ad17-96ef7fdd6874" containerName="registry-server" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.783488 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="98d7cba9-d28a-4470-bfc1-068233ffe16e" containerName="horizon-log" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.784698 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.800803 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5d9c956f9d-95gps"] Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.876900 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/771edf61-8fc3-443b-855c-848aa101293f-combined-ca-bundle\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.876964 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/771edf61-8fc3-443b-855c-848aa101293f-config-data\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.877062 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/771edf61-8fc3-443b-855c-848aa101293f-logs\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.877098 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/771edf61-8fc3-443b-855c-848aa101293f-horizon-tls-certs\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.877147 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/771edf61-8fc3-443b-855c-848aa101293f-scripts\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.877174 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2zzx\" (UniqueName: \"kubernetes.io/projected/771edf61-8fc3-443b-855c-848aa101293f-kube-api-access-q2zzx\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.877211 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/771edf61-8fc3-443b-855c-848aa101293f-horizon-secret-key\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.978572 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2zzx\" (UniqueName: \"kubernetes.io/projected/771edf61-8fc3-443b-855c-848aa101293f-kube-api-access-q2zzx\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.978614 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/771edf61-8fc3-443b-855c-848aa101293f-horizon-secret-key\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.978665 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/771edf61-8fc3-443b-855c-848aa101293f-combined-ca-bundle\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.978697 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/771edf61-8fc3-443b-855c-848aa101293f-config-data\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.978790 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/771edf61-8fc3-443b-855c-848aa101293f-logs\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.978869 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/771edf61-8fc3-443b-855c-848aa101293f-horizon-tls-certs\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.978962 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/771edf61-8fc3-443b-855c-848aa101293f-scripts\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.979417 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/771edf61-8fc3-443b-855c-848aa101293f-logs\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.980245 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/771edf61-8fc3-443b-855c-848aa101293f-config-data\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.981768 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/771edf61-8fc3-443b-855c-848aa101293f-scripts\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.985144 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/771edf61-8fc3-443b-855c-848aa101293f-horizon-tls-certs\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.985186 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/771edf61-8fc3-443b-855c-848aa101293f-horizon-secret-key\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.985499 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/771edf61-8fc3-443b-855c-848aa101293f-combined-ca-bundle\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:46 crc kubenswrapper[4932]: I1125 10:34:46.994382 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2zzx\" (UniqueName: \"kubernetes.io/projected/771edf61-8fc3-443b-855c-848aa101293f-kube-api-access-q2zzx\") pod \"horizon-5d9c956f9d-95gps\" (UID: \"771edf61-8fc3-443b-855c-848aa101293f\") " pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:47 crc kubenswrapper[4932]: I1125 10:34:47.112601 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:47 crc kubenswrapper[4932]: I1125 10:34:47.602168 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5d9c956f9d-95gps"] Nov 25 10:34:47 crc kubenswrapper[4932]: I1125 10:34:47.995735 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d9c956f9d-95gps" event={"ID":"771edf61-8fc3-443b-855c-848aa101293f","Type":"ContainerStarted","Data":"3438aa40ba12fe6141a61635a935531159ad38f8d3f20d30f6e870fa65617800"} Nov 25 10:34:47 crc kubenswrapper[4932]: I1125 10:34:47.996060 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d9c956f9d-95gps" event={"ID":"771edf61-8fc3-443b-855c-848aa101293f","Type":"ContainerStarted","Data":"07a275792d9ea7b802ad56d2d706610ebea44fbce9db1186b924f246b903d24f"} Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.089980 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-wlqvh"] Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.091814 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-wlqvh" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.110797 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-wlqvh"] Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.173943 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-0352-account-create-tcrpp"] Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.176508 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0352-account-create-tcrpp" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.179662 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.186626 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-0352-account-create-tcrpp"] Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.207920 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf65w\" (UniqueName: \"kubernetes.io/projected/65dc564e-efa5-4236-bcb5-fb93b7b22df6-kube-api-access-gf65w\") pod \"heat-db-create-wlqvh\" (UID: \"65dc564e-efa5-4236-bcb5-fb93b7b22df6\") " pod="openstack/heat-db-create-wlqvh" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.208038 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65dc564e-efa5-4236-bcb5-fb93b7b22df6-operator-scripts\") pod \"heat-db-create-wlqvh\" (UID: \"65dc564e-efa5-4236-bcb5-fb93b7b22df6\") " pod="openstack/heat-db-create-wlqvh" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.309727 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65dc564e-efa5-4236-bcb5-fb93b7b22df6-operator-scripts\") pod \"heat-db-create-wlqvh\" (UID: \"65dc564e-efa5-4236-bcb5-fb93b7b22df6\") " pod="openstack/heat-db-create-wlqvh" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.309802 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ln9lp\" (UniqueName: \"kubernetes.io/projected/f7834cb6-728c-43bb-8de6-47dd0ed632ca-kube-api-access-ln9lp\") pod \"heat-0352-account-create-tcrpp\" (UID: \"f7834cb6-728c-43bb-8de6-47dd0ed632ca\") " pod="openstack/heat-0352-account-create-tcrpp" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.309907 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7834cb6-728c-43bb-8de6-47dd0ed632ca-operator-scripts\") pod \"heat-0352-account-create-tcrpp\" (UID: \"f7834cb6-728c-43bb-8de6-47dd0ed632ca\") " pod="openstack/heat-0352-account-create-tcrpp" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.310021 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf65w\" (UniqueName: \"kubernetes.io/projected/65dc564e-efa5-4236-bcb5-fb93b7b22df6-kube-api-access-gf65w\") pod \"heat-db-create-wlqvh\" (UID: \"65dc564e-efa5-4236-bcb5-fb93b7b22df6\") " pod="openstack/heat-db-create-wlqvh" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.311033 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65dc564e-efa5-4236-bcb5-fb93b7b22df6-operator-scripts\") pod \"heat-db-create-wlqvh\" (UID: \"65dc564e-efa5-4236-bcb5-fb93b7b22df6\") " pod="openstack/heat-db-create-wlqvh" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.329744 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf65w\" (UniqueName: \"kubernetes.io/projected/65dc564e-efa5-4236-bcb5-fb93b7b22df6-kube-api-access-gf65w\") pod \"heat-db-create-wlqvh\" (UID: \"65dc564e-efa5-4236-bcb5-fb93b7b22df6\") " pod="openstack/heat-db-create-wlqvh" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.412113 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7834cb6-728c-43bb-8de6-47dd0ed632ca-operator-scripts\") pod \"heat-0352-account-create-tcrpp\" (UID: \"f7834cb6-728c-43bb-8de6-47dd0ed632ca\") " pod="openstack/heat-0352-account-create-tcrpp" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.412362 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ln9lp\" (UniqueName: \"kubernetes.io/projected/f7834cb6-728c-43bb-8de6-47dd0ed632ca-kube-api-access-ln9lp\") pod \"heat-0352-account-create-tcrpp\" (UID: \"f7834cb6-728c-43bb-8de6-47dd0ed632ca\") " pod="openstack/heat-0352-account-create-tcrpp" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.413348 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7834cb6-728c-43bb-8de6-47dd0ed632ca-operator-scripts\") pod \"heat-0352-account-create-tcrpp\" (UID: \"f7834cb6-728c-43bb-8de6-47dd0ed632ca\") " pod="openstack/heat-0352-account-create-tcrpp" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.420534 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-wlqvh" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.429556 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ln9lp\" (UniqueName: \"kubernetes.io/projected/f7834cb6-728c-43bb-8de6-47dd0ed632ca-kube-api-access-ln9lp\") pod \"heat-0352-account-create-tcrpp\" (UID: \"f7834cb6-728c-43bb-8de6-47dd0ed632ca\") " pod="openstack/heat-0352-account-create-tcrpp" Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.510855 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0352-account-create-tcrpp" Nov 25 10:34:48 crc kubenswrapper[4932]: W1125 10:34:48.900611 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65dc564e_efa5_4236_bcb5_fb93b7b22df6.slice/crio-5d77f4b43842e3fb0d919b731542f9939fca6e59db7566c30f15da42619e4950 WatchSource:0}: Error finding container 5d77f4b43842e3fb0d919b731542f9939fca6e59db7566c30f15da42619e4950: Status 404 returned error can't find the container with id 5d77f4b43842e3fb0d919b731542f9939fca6e59db7566c30f15da42619e4950 Nov 25 10:34:48 crc kubenswrapper[4932]: I1125 10:34:48.901896 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-wlqvh"] Nov 25 10:34:49 crc kubenswrapper[4932]: I1125 10:34:49.032127 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5d9c956f9d-95gps" event={"ID":"771edf61-8fc3-443b-855c-848aa101293f","Type":"ContainerStarted","Data":"4c70b0781d9ab15fadffe7828c70371d3d0c303c61b212f369344d03dd4d0f6a"} Nov 25 10:34:49 crc kubenswrapper[4932]: I1125 10:34:49.034601 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-wlqvh" event={"ID":"65dc564e-efa5-4236-bcb5-fb93b7b22df6","Type":"ContainerStarted","Data":"5d77f4b43842e3fb0d919b731542f9939fca6e59db7566c30f15da42619e4950"} Nov 25 10:34:49 crc kubenswrapper[4932]: I1125 10:34:49.040986 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-0352-account-create-tcrpp"] Nov 25 10:34:49 crc kubenswrapper[4932]: I1125 10:34:49.057306 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5d9c956f9d-95gps" podStartSLOduration=3.057086598 podStartE2EDuration="3.057086598s" podCreationTimestamp="2025-11-25 10:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:34:49.052911159 +0000 UTC m=+6349.178940742" watchObservedRunningTime="2025-11-25 10:34:49.057086598 +0000 UTC m=+6349.183116161" Nov 25 10:34:50 crc kubenswrapper[4932]: I1125 10:34:50.045570 4932 generic.go:334] "Generic (PLEG): container finished" podID="f7834cb6-728c-43bb-8de6-47dd0ed632ca" containerID="69e9b29925388e2c1f8d386bd743052dff9fc2b449bb745ad81008eb92c54947" exitCode=0 Nov 25 10:34:50 crc kubenswrapper[4932]: I1125 10:34:50.045668 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-0352-account-create-tcrpp" event={"ID":"f7834cb6-728c-43bb-8de6-47dd0ed632ca","Type":"ContainerDied","Data":"69e9b29925388e2c1f8d386bd743052dff9fc2b449bb745ad81008eb92c54947"} Nov 25 10:34:50 crc kubenswrapper[4932]: I1125 10:34:50.045972 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-0352-account-create-tcrpp" event={"ID":"f7834cb6-728c-43bb-8de6-47dd0ed632ca","Type":"ContainerStarted","Data":"03454b263f8b63862562315ecc1bbd94baa9d3b58abebfbaa5f88aa7e3783978"} Nov 25 10:34:50 crc kubenswrapper[4932]: I1125 10:34:50.047877 4932 generic.go:334] "Generic (PLEG): container finished" podID="65dc564e-efa5-4236-bcb5-fb93b7b22df6" containerID="ff85de16609fd3055a115b7345feabb30aa61191df647dc37ee363f61074516b" exitCode=0 Nov 25 10:34:50 crc kubenswrapper[4932]: I1125 10:34:50.047945 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-wlqvh" event={"ID":"65dc564e-efa5-4236-bcb5-fb93b7b22df6","Type":"ContainerDied","Data":"ff85de16609fd3055a115b7345feabb30aa61191df647dc37ee363f61074516b"} Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.480658 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-wlqvh" Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.497853 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0352-account-create-tcrpp" Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.599097 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7834cb6-728c-43bb-8de6-47dd0ed632ca-operator-scripts\") pod \"f7834cb6-728c-43bb-8de6-47dd0ed632ca\" (UID: \"f7834cb6-728c-43bb-8de6-47dd0ed632ca\") " Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.599182 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ln9lp\" (UniqueName: \"kubernetes.io/projected/f7834cb6-728c-43bb-8de6-47dd0ed632ca-kube-api-access-ln9lp\") pod \"f7834cb6-728c-43bb-8de6-47dd0ed632ca\" (UID: \"f7834cb6-728c-43bb-8de6-47dd0ed632ca\") " Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.599284 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65dc564e-efa5-4236-bcb5-fb93b7b22df6-operator-scripts\") pod \"65dc564e-efa5-4236-bcb5-fb93b7b22df6\" (UID: \"65dc564e-efa5-4236-bcb5-fb93b7b22df6\") " Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.599355 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf65w\" (UniqueName: \"kubernetes.io/projected/65dc564e-efa5-4236-bcb5-fb93b7b22df6-kube-api-access-gf65w\") pod \"65dc564e-efa5-4236-bcb5-fb93b7b22df6\" (UID: \"65dc564e-efa5-4236-bcb5-fb93b7b22df6\") " Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.599610 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7834cb6-728c-43bb-8de6-47dd0ed632ca-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f7834cb6-728c-43bb-8de6-47dd0ed632ca" (UID: "f7834cb6-728c-43bb-8de6-47dd0ed632ca"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.599933 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7834cb6-728c-43bb-8de6-47dd0ed632ca-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.600019 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65dc564e-efa5-4236-bcb5-fb93b7b22df6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "65dc564e-efa5-4236-bcb5-fb93b7b22df6" (UID: "65dc564e-efa5-4236-bcb5-fb93b7b22df6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.606425 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65dc564e-efa5-4236-bcb5-fb93b7b22df6-kube-api-access-gf65w" (OuterVolumeSpecName: "kube-api-access-gf65w") pod "65dc564e-efa5-4236-bcb5-fb93b7b22df6" (UID: "65dc564e-efa5-4236-bcb5-fb93b7b22df6"). InnerVolumeSpecName "kube-api-access-gf65w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.606464 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7834cb6-728c-43bb-8de6-47dd0ed632ca-kube-api-access-ln9lp" (OuterVolumeSpecName: "kube-api-access-ln9lp") pod "f7834cb6-728c-43bb-8de6-47dd0ed632ca" (UID: "f7834cb6-728c-43bb-8de6-47dd0ed632ca"). InnerVolumeSpecName "kube-api-access-ln9lp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.702284 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf65w\" (UniqueName: \"kubernetes.io/projected/65dc564e-efa5-4236-bcb5-fb93b7b22df6-kube-api-access-gf65w\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.702513 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ln9lp\" (UniqueName: \"kubernetes.io/projected/f7834cb6-728c-43bb-8de6-47dd0ed632ca-kube-api-access-ln9lp\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:51 crc kubenswrapper[4932]: I1125 10:34:51.703218 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65dc564e-efa5-4236-bcb5-fb93b7b22df6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:34:52 crc kubenswrapper[4932]: I1125 10:34:52.089767 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-wlqvh" event={"ID":"65dc564e-efa5-4236-bcb5-fb93b7b22df6","Type":"ContainerDied","Data":"5d77f4b43842e3fb0d919b731542f9939fca6e59db7566c30f15da42619e4950"} Nov 25 10:34:52 crc kubenswrapper[4932]: I1125 10:34:52.089991 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d77f4b43842e3fb0d919b731542f9939fca6e59db7566c30f15da42619e4950" Nov 25 10:34:52 crc kubenswrapper[4932]: I1125 10:34:52.089801 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-wlqvh" Nov 25 10:34:52 crc kubenswrapper[4932]: I1125 10:34:52.099075 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-0352-account-create-tcrpp" event={"ID":"f7834cb6-728c-43bb-8de6-47dd0ed632ca","Type":"ContainerDied","Data":"03454b263f8b63862562315ecc1bbd94baa9d3b58abebfbaa5f88aa7e3783978"} Nov 25 10:34:52 crc kubenswrapper[4932]: I1125 10:34:52.099117 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03454b263f8b63862562315ecc1bbd94baa9d3b58abebfbaa5f88aa7e3783978" Nov 25 10:34:52 crc kubenswrapper[4932]: I1125 10:34:52.099171 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-0352-account-create-tcrpp" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.290081 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-jz9v2"] Nov 25 10:34:53 crc kubenswrapper[4932]: E1125 10:34:53.292289 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7834cb6-728c-43bb-8de6-47dd0ed632ca" containerName="mariadb-account-create" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.292386 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7834cb6-728c-43bb-8de6-47dd0ed632ca" containerName="mariadb-account-create" Nov 25 10:34:53 crc kubenswrapper[4932]: E1125 10:34:53.292521 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65dc564e-efa5-4236-bcb5-fb93b7b22df6" containerName="mariadb-database-create" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.292590 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="65dc564e-efa5-4236-bcb5-fb93b7b22df6" containerName="mariadb-database-create" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.292882 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="65dc564e-efa5-4236-bcb5-fb93b7b22df6" containerName="mariadb-database-create" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.292982 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7834cb6-728c-43bb-8de6-47dd0ed632ca" containerName="mariadb-account-create" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.293904 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-jz9v2" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.296941 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-zpw6x" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.300920 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-jz9v2"] Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.302664 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.436397 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdfph\" (UniqueName: \"kubernetes.io/projected/53043b1f-cee1-4f76-a0aa-7510ed0f3722-kube-api-access-gdfph\") pod \"heat-db-sync-jz9v2\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " pod="openstack/heat-db-sync-jz9v2" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.436557 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-combined-ca-bundle\") pod \"heat-db-sync-jz9v2\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " pod="openstack/heat-db-sync-jz9v2" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.436763 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-config-data\") pod \"heat-db-sync-jz9v2\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " pod="openstack/heat-db-sync-jz9v2" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.538602 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-config-data\") pod \"heat-db-sync-jz9v2\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " pod="openstack/heat-db-sync-jz9v2" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.538676 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdfph\" (UniqueName: \"kubernetes.io/projected/53043b1f-cee1-4f76-a0aa-7510ed0f3722-kube-api-access-gdfph\") pod \"heat-db-sync-jz9v2\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " pod="openstack/heat-db-sync-jz9v2" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.538732 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-combined-ca-bundle\") pod \"heat-db-sync-jz9v2\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " pod="openstack/heat-db-sync-jz9v2" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.544711 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-config-data\") pod \"heat-db-sync-jz9v2\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " pod="openstack/heat-db-sync-jz9v2" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.547252 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-combined-ca-bundle\") pod \"heat-db-sync-jz9v2\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " pod="openstack/heat-db-sync-jz9v2" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.556167 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdfph\" (UniqueName: \"kubernetes.io/projected/53043b1f-cee1-4f76-a0aa-7510ed0f3722-kube-api-access-gdfph\") pod \"heat-db-sync-jz9v2\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " pod="openstack/heat-db-sync-jz9v2" Nov 25 10:34:53 crc kubenswrapper[4932]: I1125 10:34:53.616360 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-jz9v2" Nov 25 10:34:54 crc kubenswrapper[4932]: I1125 10:34:54.104831 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-jz9v2"] Nov 25 10:34:54 crc kubenswrapper[4932]: I1125 10:34:54.112635 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:34:55 crc kubenswrapper[4932]: I1125 10:34:55.133877 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-jz9v2" event={"ID":"53043b1f-cee1-4f76-a0aa-7510ed0f3722","Type":"ContainerStarted","Data":"f1570ebe16a52d5911759b54ef865d52b4cfe76d7cc0a384ff08f16f62b777e0"} Nov 25 10:34:57 crc kubenswrapper[4932]: I1125 10:34:57.113866 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:34:57 crc kubenswrapper[4932]: I1125 10:34:57.114124 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:35:04 crc kubenswrapper[4932]: I1125 10:35:04.224310 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-jz9v2" event={"ID":"53043b1f-cee1-4f76-a0aa-7510ed0f3722","Type":"ContainerStarted","Data":"1bda477b5ead6a6d0e524e1b3e6cb8295051fc3949cc34bb7345125e81fd1141"} Nov 25 10:35:04 crc kubenswrapper[4932]: I1125 10:35:04.247136 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-jz9v2" podStartSLOduration=2.371874685 podStartE2EDuration="11.24711063s" podCreationTimestamp="2025-11-25 10:34:53 +0000 UTC" firstStartedPulling="2025-11-25 10:34:54.112374039 +0000 UTC m=+6354.238403692" lastFinishedPulling="2025-11-25 10:35:02.987610074 +0000 UTC m=+6363.113639637" observedRunningTime="2025-11-25 10:35:04.242497738 +0000 UTC m=+6364.368527301" watchObservedRunningTime="2025-11-25 10:35:04.24711063 +0000 UTC m=+6364.373140193" Nov 25 10:35:05 crc kubenswrapper[4932]: I1125 10:35:05.234713 4932 generic.go:334] "Generic (PLEG): container finished" podID="53043b1f-cee1-4f76-a0aa-7510ed0f3722" containerID="1bda477b5ead6a6d0e524e1b3e6cb8295051fc3949cc34bb7345125e81fd1141" exitCode=0 Nov 25 10:35:05 crc kubenswrapper[4932]: I1125 10:35:05.234797 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-jz9v2" event={"ID":"53043b1f-cee1-4f76-a0aa-7510ed0f3722","Type":"ContainerDied","Data":"1bda477b5ead6a6d0e524e1b3e6cb8295051fc3949cc34bb7345125e81fd1141"} Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.181545 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.182547 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.522815 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" podUID="96d031ad-3550-4423-9422-93911c9a8217" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.76:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.524345 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" podUID="96d031ad-3550-4423-9422-93911c9a8217" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.76:8081/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.524389 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5d9c956f9d-95gps" podUID="771edf61-8fc3-443b-855c-848aa101293f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.131:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.131:8443: connect: connection refused" Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.825412 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-jz9v2" Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.858931 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-combined-ca-bundle\") pod \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.859018 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-config-data\") pod \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.859058 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdfph\" (UniqueName: \"kubernetes.io/projected/53043b1f-cee1-4f76-a0aa-7510ed0f3722-kube-api-access-gdfph\") pod \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\" (UID: \"53043b1f-cee1-4f76-a0aa-7510ed0f3722\") " Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.866407 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53043b1f-cee1-4f76-a0aa-7510ed0f3722-kube-api-access-gdfph" (OuterVolumeSpecName: "kube-api-access-gdfph") pod "53043b1f-cee1-4f76-a0aa-7510ed0f3722" (UID: "53043b1f-cee1-4f76-a0aa-7510ed0f3722"). InnerVolumeSpecName "kube-api-access-gdfph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.945419 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "53043b1f-cee1-4f76-a0aa-7510ed0f3722" (UID: "53043b1f-cee1-4f76-a0aa-7510ed0f3722"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.954847 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-config-data" (OuterVolumeSpecName: "config-data") pod "53043b1f-cee1-4f76-a0aa-7510ed0f3722" (UID: "53043b1f-cee1-4f76-a0aa-7510ed0f3722"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.970750 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.970795 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53043b1f-cee1-4f76-a0aa-7510ed0f3722-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:07 crc kubenswrapper[4932]: I1125 10:35:07.970808 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdfph\" (UniqueName: \"kubernetes.io/projected/53043b1f-cee1-4f76-a0aa-7510ed0f3722-kube-api-access-gdfph\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:08 crc kubenswrapper[4932]: I1125 10:35:08.579875 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-jz9v2" event={"ID":"53043b1f-cee1-4f76-a0aa-7510ed0f3722","Type":"ContainerDied","Data":"f1570ebe16a52d5911759b54ef865d52b4cfe76d7cc0a384ff08f16f62b777e0"} Nov 25 10:35:08 crc kubenswrapper[4932]: I1125 10:35:08.579923 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1570ebe16a52d5911759b54ef865d52b4cfe76d7cc0a384ff08f16f62b777e0" Nov 25 10:35:08 crc kubenswrapper[4932]: I1125 10:35:08.579949 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-jz9v2" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.481381 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-846f7bf768-bdlxf"] Nov 25 10:35:09 crc kubenswrapper[4932]: E1125 10:35:09.485332 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53043b1f-cee1-4f76-a0aa-7510ed0f3722" containerName="heat-db-sync" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.485371 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="53043b1f-cee1-4f76-a0aa-7510ed0f3722" containerName="heat-db-sync" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.485714 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="53043b1f-cee1-4f76-a0aa-7510ed0f3722" containerName="heat-db-sync" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.486441 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.488253 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.488501 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-zpw6x" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.491143 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.499419 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdgkp\" (UniqueName: \"kubernetes.io/projected/a6480479-6f69-4d9c-80a6-8b2269df40fc-kube-api-access-bdgkp\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.499480 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.499662 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-combined-ca-bundle\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.499715 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data-custom\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.506422 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-846f7bf768-bdlxf"] Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.602817 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-combined-ca-bundle\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.602907 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data-custom\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.603165 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdgkp\" (UniqueName: \"kubernetes.io/projected/a6480479-6f69-4d9c-80a6-8b2269df40fc-kube-api-access-bdgkp\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.603241 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.610772 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-combined-ca-bundle\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.641540 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdgkp\" (UniqueName: \"kubernetes.io/projected/a6480479-6f69-4d9c-80a6-8b2269df40fc-kube-api-access-bdgkp\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.658304 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-57d554f45b-2jhr6"] Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.660097 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.661885 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.664706 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.683445 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data-custom\") pod \"heat-engine-846f7bf768-bdlxf\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.699955 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-57d554f45b-2jhr6"] Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.734551 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7888997d94-lljtp"] Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.735885 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.739660 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.743803 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7888997d94-lljtp"] Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.813157 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.825570 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zxpb\" (UniqueName: \"kubernetes.io/projected/98767fd7-2f73-47e3-9f65-b3e08c558b6f-kube-api-access-4zxpb\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.825677 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data-custom\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.825819 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-combined-ca-bundle\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.825939 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-combined-ca-bundle\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.826072 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.826094 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.826130 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zw8cp\" (UniqueName: \"kubernetes.io/projected/cb7b39c9-c205-48e2-8d32-4028deddc8b5-kube-api-access-zw8cp\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.826157 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data-custom\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.935076 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.935110 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.935139 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zw8cp\" (UniqueName: \"kubernetes.io/projected/cb7b39c9-c205-48e2-8d32-4028deddc8b5-kube-api-access-zw8cp\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.935162 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data-custom\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.935243 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zxpb\" (UniqueName: \"kubernetes.io/projected/98767fd7-2f73-47e3-9f65-b3e08c558b6f-kube-api-access-4zxpb\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.935289 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data-custom\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.936506 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-combined-ca-bundle\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.936567 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-combined-ca-bundle\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.941590 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data-custom\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.942264 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data-custom\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.944688 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.945344 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-combined-ca-bundle\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.945944 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.961907 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zxpb\" (UniqueName: \"kubernetes.io/projected/98767fd7-2f73-47e3-9f65-b3e08c558b6f-kube-api-access-4zxpb\") pod \"heat-cfnapi-7888997d94-lljtp\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.962104 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-combined-ca-bundle\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:09 crc kubenswrapper[4932]: I1125 10:35:09.980963 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zw8cp\" (UniqueName: \"kubernetes.io/projected/cb7b39c9-c205-48e2-8d32-4028deddc8b5-kube-api-access-zw8cp\") pod \"heat-api-57d554f45b-2jhr6\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:10 crc kubenswrapper[4932]: I1125 10:35:10.098896 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:10 crc kubenswrapper[4932]: I1125 10:35:10.114114 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:10 crc kubenswrapper[4932]: I1125 10:35:10.321862 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-846f7bf768-bdlxf"] Nov 25 10:35:10 crc kubenswrapper[4932]: W1125 10:35:10.331680 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda6480479_6f69_4d9c_80a6_8b2269df40fc.slice/crio-e8dd5adcb24804a263ac4f82e836aaae04623de8dc06eec26ef1689ada53feec WatchSource:0}: Error finding container e8dd5adcb24804a263ac4f82e836aaae04623de8dc06eec26ef1689ada53feec: Status 404 returned error can't find the container with id e8dd5adcb24804a263ac4f82e836aaae04623de8dc06eec26ef1689ada53feec Nov 25 10:35:10 crc kubenswrapper[4932]: I1125 10:35:10.600651 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-846f7bf768-bdlxf" event={"ID":"a6480479-6f69-4d9c-80a6-8b2269df40fc","Type":"ContainerStarted","Data":"768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4"} Nov 25 10:35:10 crc kubenswrapper[4932]: I1125 10:35:10.600958 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-846f7bf768-bdlxf" event={"ID":"a6480479-6f69-4d9c-80a6-8b2269df40fc","Type":"ContainerStarted","Data":"e8dd5adcb24804a263ac4f82e836aaae04623de8dc06eec26ef1689ada53feec"} Nov 25 10:35:10 crc kubenswrapper[4932]: I1125 10:35:10.600992 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:10 crc kubenswrapper[4932]: W1125 10:35:10.647679 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod98767fd7_2f73_47e3_9f65_b3e08c558b6f.slice/crio-447f5ac4806f2772598b9e302827fcb9992177a8ec926d680b67d13c5d103916 WatchSource:0}: Error finding container 447f5ac4806f2772598b9e302827fcb9992177a8ec926d680b67d13c5d103916: Status 404 returned error can't find the container with id 447f5ac4806f2772598b9e302827fcb9992177a8ec926d680b67d13c5d103916 Nov 25 10:35:10 crc kubenswrapper[4932]: I1125 10:35:10.648137 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-846f7bf768-bdlxf" podStartSLOduration=1.648119634 podStartE2EDuration="1.648119634s" podCreationTimestamp="2025-11-25 10:35:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:35:10.637977143 +0000 UTC m=+6370.764006716" watchObservedRunningTime="2025-11-25 10:35:10.648119634 +0000 UTC m=+6370.774149187" Nov 25 10:35:10 crc kubenswrapper[4932]: I1125 10:35:10.654081 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7888997d94-lljtp"] Nov 25 10:35:10 crc kubenswrapper[4932]: I1125 10:35:10.711639 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-57d554f45b-2jhr6"] Nov 25 10:35:11 crc kubenswrapper[4932]: I1125 10:35:11.626039 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-57d554f45b-2jhr6" event={"ID":"cb7b39c9-c205-48e2-8d32-4028deddc8b5","Type":"ContainerStarted","Data":"558c2f6c21301bc6b2f872a1b747d7311f3eabba44587f86b8cc9df564ea73aa"} Nov 25 10:35:11 crc kubenswrapper[4932]: I1125 10:35:11.628062 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7888997d94-lljtp" event={"ID":"98767fd7-2f73-47e3-9f65-b3e08c558b6f","Type":"ContainerStarted","Data":"447f5ac4806f2772598b9e302827fcb9992177a8ec926d680b67d13c5d103916"} Nov 25 10:35:11 crc kubenswrapper[4932]: I1125 10:35:11.904019 4932 scope.go:117] "RemoveContainer" containerID="cd97c76aa1c46dbbc0948e04e2b05305c6fc14ed57dbf2f5d5635dd00e7eeb94" Nov 25 10:35:15 crc kubenswrapper[4932]: I1125 10:35:15.288587 4932 scope.go:117] "RemoveContainer" containerID="05d101558d87b65a0935ca3040aae225d8b500b789164b830ff10b54c74cd860" Nov 25 10:35:15 crc kubenswrapper[4932]: I1125 10:35:15.339296 4932 scope.go:117] "RemoveContainer" containerID="cac2c89ed9e7906ab866e883a38db59eb2d2e0716dc7661b0897b4db7f840df5" Nov 25 10:35:16 crc kubenswrapper[4932]: I1125 10:35:16.679498 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7888997d94-lljtp" event={"ID":"98767fd7-2f73-47e3-9f65-b3e08c558b6f","Type":"ContainerStarted","Data":"5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a"} Nov 25 10:35:16 crc kubenswrapper[4932]: I1125 10:35:16.680080 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:16 crc kubenswrapper[4932]: I1125 10:35:16.682515 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-57d554f45b-2jhr6" event={"ID":"cb7b39c9-c205-48e2-8d32-4028deddc8b5","Type":"ContainerStarted","Data":"4b05d2c5e5a46a193fac08bfc6e67da9fbe4f6a3c24038ca987ee7c500695fb2"} Nov 25 10:35:16 crc kubenswrapper[4932]: I1125 10:35:16.682681 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:16 crc kubenswrapper[4932]: I1125 10:35:16.700676 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7888997d94-lljtp" podStartSLOduration=3.062307465 podStartE2EDuration="7.700660486s" podCreationTimestamp="2025-11-25 10:35:09 +0000 UTC" firstStartedPulling="2025-11-25 10:35:10.65078167 +0000 UTC m=+6370.776811233" lastFinishedPulling="2025-11-25 10:35:15.289134691 +0000 UTC m=+6375.415164254" observedRunningTime="2025-11-25 10:35:16.69832968 +0000 UTC m=+6376.824359263" watchObservedRunningTime="2025-11-25 10:35:16.700660486 +0000 UTC m=+6376.826690049" Nov 25 10:35:16 crc kubenswrapper[4932]: I1125 10:35:16.715078 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-57d554f45b-2jhr6" podStartSLOduration=3.142545071 podStartE2EDuration="7.715059539s" podCreationTimestamp="2025-11-25 10:35:09 +0000 UTC" firstStartedPulling="2025-11-25 10:35:10.717662533 +0000 UTC m=+6370.843692096" lastFinishedPulling="2025-11-25 10:35:15.290177001 +0000 UTC m=+6375.416206564" observedRunningTime="2025-11-25 10:35:16.712168296 +0000 UTC m=+6376.838197879" watchObservedRunningTime="2025-11-25 10:35:16.715059539 +0000 UTC m=+6376.841089102" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.046522 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-694766f497-mk7kp"] Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.048169 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.062707 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-69d5fd574b-t8kpc"] Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.064457 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.074816 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-694766f497-mk7kp"] Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.088501 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-557dbd6b8b-rp6cj"] Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.097264 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-557dbd6b8b-rp6cj"] Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.099247 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.218599 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data-custom\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.218672 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data-custom\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.218751 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-combined-ca-bundle\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.218773 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dlljd\" (UniqueName: \"kubernetes.io/projected/354fa154-1782-491f-9aa3-a3e1b6d755d2-kube-api-access-dlljd\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.218804 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.218821 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jk5tg\" (UniqueName: \"kubernetes.io/projected/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-kube-api-access-jk5tg\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.218845 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/354fa154-1782-491f-9aa3-a3e1b6d755d2-combined-ca-bundle\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.218952 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h98zj\" (UniqueName: \"kubernetes.io/projected/33022877-bd26-4a1c-8efd-afa1d50af04e-kube-api-access-h98zj\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.219015 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/354fa154-1782-491f-9aa3-a3e1b6d755d2-config-data\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.219109 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-combined-ca-bundle\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.219163 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.219229 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/354fa154-1782-491f-9aa3-a3e1b6d755d2-config-data-custom\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.304293 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-69d5fd574b-t8kpc"] Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.320890 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-combined-ca-bundle\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.320943 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dlljd\" (UniqueName: \"kubernetes.io/projected/354fa154-1782-491f-9aa3-a3e1b6d755d2-kube-api-access-dlljd\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.320996 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.321025 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jk5tg\" (UniqueName: \"kubernetes.io/projected/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-kube-api-access-jk5tg\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.321061 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/354fa154-1782-491f-9aa3-a3e1b6d755d2-combined-ca-bundle\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.321097 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h98zj\" (UniqueName: \"kubernetes.io/projected/33022877-bd26-4a1c-8efd-afa1d50af04e-kube-api-access-h98zj\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.321130 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/354fa154-1782-491f-9aa3-a3e1b6d755d2-config-data\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.321185 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-combined-ca-bundle\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.321370 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.321401 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/354fa154-1782-491f-9aa3-a3e1b6d755d2-config-data-custom\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.321455 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data-custom\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.321530 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data-custom\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.328626 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/354fa154-1782-491f-9aa3-a3e1b6d755d2-config-data\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.328898 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data-custom\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.336946 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data-custom\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.337240 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.337761 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/354fa154-1782-491f-9aa3-a3e1b6d755d2-config-data-custom\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.338571 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-combined-ca-bundle\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.340780 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-combined-ca-bundle\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.342491 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/354fa154-1782-491f-9aa3-a3e1b6d755d2-combined-ca-bundle\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.343866 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dlljd\" (UniqueName: \"kubernetes.io/projected/354fa154-1782-491f-9aa3-a3e1b6d755d2-kube-api-access-dlljd\") pod \"heat-engine-694766f497-mk7kp\" (UID: \"354fa154-1782-491f-9aa3-a3e1b6d755d2\") " pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.345143 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h98zj\" (UniqueName: \"kubernetes.io/projected/33022877-bd26-4a1c-8efd-afa1d50af04e-kube-api-access-h98zj\") pod \"heat-cfnapi-69d5fd574b-t8kpc\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.346144 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jk5tg\" (UniqueName: \"kubernetes.io/projected/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-kube-api-access-jk5tg\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.347050 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data\") pod \"heat-api-557dbd6b8b-rp6cj\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.547346 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.547446 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:17 crc kubenswrapper[4932]: I1125 10:35:17.547491 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.156083 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-557dbd6b8b-rp6cj"] Nov 25 10:35:18 crc kubenswrapper[4932]: W1125 10:35:18.156077 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod71ee76e8_fa49_4fe6_a13f_6d7c3c986db3.slice/crio-da2036357bdaaebacd373f708e804ca422e62cd3a8df17d56069c2a0fc22834c WatchSource:0}: Error finding container da2036357bdaaebacd373f708e804ca422e62cd3a8df17d56069c2a0fc22834c: Status 404 returned error can't find the container with id da2036357bdaaebacd373f708e804ca422e62cd3a8df17d56069c2a0fc22834c Nov 25 10:35:18 crc kubenswrapper[4932]: W1125 10:35:18.162470 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod354fa154_1782_491f_9aa3_a3e1b6d755d2.slice/crio-2ccdc64d6ecba5e82d4767b21da7adaff32f28e6b29c043e129b9153674d932e WatchSource:0}: Error finding container 2ccdc64d6ecba5e82d4767b21da7adaff32f28e6b29c043e129b9153674d932e: Status 404 returned error can't find the container with id 2ccdc64d6ecba5e82d4767b21da7adaff32f28e6b29c043e129b9153674d932e Nov 25 10:35:18 crc kubenswrapper[4932]: W1125 10:35:18.171415 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33022877_bd26_4a1c_8efd_afa1d50af04e.slice/crio-083dca86548a1593a0dae23a89d61d49af384034373c307126a3bbfea8343953 WatchSource:0}: Error finding container 083dca86548a1593a0dae23a89d61d49af384034373c307126a3bbfea8343953: Status 404 returned error can't find the container with id 083dca86548a1593a0dae23a89d61d49af384034373c307126a3bbfea8343953 Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.173772 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-694766f497-mk7kp"] Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.184478 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-69d5fd574b-t8kpc"] Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.417276 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-57d554f45b-2jhr6"] Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.426778 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7888997d94-lljtp"] Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.437290 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-7b8b7647f7-b7n2s"] Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.439007 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.453710 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.453898 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.457517 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-57dc46b8c-d5hhg"] Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.459348 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.465754 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.466092 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.487445 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7b8b7647f7-b7n2s"] Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.500071 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-57dc46b8c-d5hhg"] Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562013 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4g2n\" (UniqueName: \"kubernetes.io/projected/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-kube-api-access-m4g2n\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562060 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-combined-ca-bundle\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562113 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-public-tls-certs\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562134 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-internal-tls-certs\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562240 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-combined-ca-bundle\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562267 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2s6g\" (UniqueName: \"kubernetes.io/projected/cc9cd39b-0321-4858-9e14-88cbe5cf3013-kube-api-access-x2s6g\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562344 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-config-data\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562397 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-internal-tls-certs\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562574 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-config-data-custom\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562680 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-public-tls-certs\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562742 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-config-data-custom\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.562773 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-config-data\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.664553 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-config-data-custom\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.664696 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-config-data\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.665476 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4g2n\" (UniqueName: \"kubernetes.io/projected/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-kube-api-access-m4g2n\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.665503 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-combined-ca-bundle\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.665845 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-public-tls-certs\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.667148 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-internal-tls-certs\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.667280 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-combined-ca-bundle\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.667368 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2s6g\" (UniqueName: \"kubernetes.io/projected/cc9cd39b-0321-4858-9e14-88cbe5cf3013-kube-api-access-x2s6g\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.667644 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-config-data\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.667752 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-internal-tls-certs\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.667862 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-config-data-custom\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.669036 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-public-tls-certs\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.678899 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-internal-tls-certs\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.684061 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-combined-ca-bundle\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.684588 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-public-tls-certs\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.686590 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-config-data\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.688376 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-config-data\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.689644 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-config-data-custom\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.690050 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4g2n\" (UniqueName: \"kubernetes.io/projected/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-kube-api-access-m4g2n\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.690749 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-combined-ca-bundle\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.701002 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-public-tls-certs\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.701298 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2s6g\" (UniqueName: \"kubernetes.io/projected/cc9cd39b-0321-4858-9e14-88cbe5cf3013-kube-api-access-x2s6g\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.701919 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7557fdd5-57f2-4e76-8686-1f9e48ddb23d-internal-tls-certs\") pod \"heat-api-7b8b7647f7-b7n2s\" (UID: \"7557fdd5-57f2-4e76-8686-1f9e48ddb23d\") " pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.703509 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cc9cd39b-0321-4858-9e14-88cbe5cf3013-config-data-custom\") pod \"heat-cfnapi-57dc46b8c-d5hhg\" (UID: \"cc9cd39b-0321-4858-9e14-88cbe5cf3013\") " pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.713358 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-694766f497-mk7kp" event={"ID":"354fa154-1782-491f-9aa3-a3e1b6d755d2","Type":"ContainerStarted","Data":"2ccdc64d6ecba5e82d4767b21da7adaff32f28e6b29c043e129b9153674d932e"} Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.715543 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-557dbd6b8b-rp6cj" event={"ID":"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3","Type":"ContainerStarted","Data":"da2036357bdaaebacd373f708e804ca422e62cd3a8df17d56069c2a0fc22834c"} Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.716803 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-57d554f45b-2jhr6" podUID="cb7b39c9-c205-48e2-8d32-4028deddc8b5" containerName="heat-api" containerID="cri-o://4b05d2c5e5a46a193fac08bfc6e67da9fbe4f6a3c24038ca987ee7c500695fb2" gracePeriod=60 Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.717798 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-7888997d94-lljtp" podUID="98767fd7-2f73-47e3-9f65-b3e08c558b6f" containerName="heat-cfnapi" containerID="cri-o://5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a" gracePeriod=60 Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.718130 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" event={"ID":"33022877-bd26-4a1c-8efd-afa1d50af04e","Type":"ContainerStarted","Data":"083dca86548a1593a0dae23a89d61d49af384034373c307126a3bbfea8343953"} Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.939970 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:18 crc kubenswrapper[4932]: I1125 10:35:18.951964 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.526885 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-7b8b7647f7-b7n2s"] Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.564064 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.727178 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-57dc46b8c-d5hhg"] Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.727652 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.779640 4932 generic.go:334] "Generic (PLEG): container finished" podID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" containerID="bd3f784effbaf7e6043dbf8b1941b0e9e7f8b2cb624d24c01ba0a7b29af5c2e2" exitCode=1 Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.779721 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-557dbd6b8b-rp6cj" event={"ID":"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3","Type":"ContainerDied","Data":"bd3f784effbaf7e6043dbf8b1941b0e9e7f8b2cb624d24c01ba0a7b29af5c2e2"} Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.780366 4932 scope.go:117] "RemoveContainer" containerID="bd3f784effbaf7e6043dbf8b1941b0e9e7f8b2cb624d24c01ba0a7b29af5c2e2" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.799450 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data-custom\") pod \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.799536 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zxpb\" (UniqueName: \"kubernetes.io/projected/98767fd7-2f73-47e3-9f65-b3e08c558b6f-kube-api-access-4zxpb\") pod \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.799602 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-combined-ca-bundle\") pod \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.799644 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data\") pod \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\" (UID: \"98767fd7-2f73-47e3-9f65-b3e08c558b6f\") " Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.802892 4932 generic.go:334] "Generic (PLEG): container finished" podID="98767fd7-2f73-47e3-9f65-b3e08c558b6f" containerID="5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a" exitCode=0 Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.802977 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7888997d94-lljtp" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.802986 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7888997d94-lljtp" event={"ID":"98767fd7-2f73-47e3-9f65-b3e08c558b6f","Type":"ContainerDied","Data":"5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a"} Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.803018 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7888997d94-lljtp" event={"ID":"98767fd7-2f73-47e3-9f65-b3e08c558b6f","Type":"ContainerDied","Data":"447f5ac4806f2772598b9e302827fcb9992177a8ec926d680b67d13c5d103916"} Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.803047 4932 scope.go:117] "RemoveContainer" containerID="5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.818924 4932 generic.go:334] "Generic (PLEG): container finished" podID="33022877-bd26-4a1c-8efd-afa1d50af04e" containerID="35a72bf51474190817026bbbf10efa200410922f6010f24210a7caa9aaef85dc" exitCode=1 Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.819043 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" event={"ID":"33022877-bd26-4a1c-8efd-afa1d50af04e","Type":"ContainerDied","Data":"35a72bf51474190817026bbbf10efa200410922f6010f24210a7caa9aaef85dc"} Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.820351 4932 scope.go:117] "RemoveContainer" containerID="35a72bf51474190817026bbbf10efa200410922f6010f24210a7caa9aaef85dc" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.830147 4932 generic.go:334] "Generic (PLEG): container finished" podID="cb7b39c9-c205-48e2-8d32-4028deddc8b5" containerID="4b05d2c5e5a46a193fac08bfc6e67da9fbe4f6a3c24038ca987ee7c500695fb2" exitCode=0 Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.830224 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-57d554f45b-2jhr6" event={"ID":"cb7b39c9-c205-48e2-8d32-4028deddc8b5","Type":"ContainerDied","Data":"4b05d2c5e5a46a193fac08bfc6e67da9fbe4f6a3c24038ca987ee7c500695fb2"} Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.835382 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b8b7647f7-b7n2s" event={"ID":"7557fdd5-57f2-4e76-8686-1f9e48ddb23d","Type":"ContainerStarted","Data":"75b7888541d2d3ea68005a7cc06c53bfbc1d128411e28a639718570616a8e2ae"} Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.838315 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98767fd7-2f73-47e3-9f65-b3e08c558b6f-kube-api-access-4zxpb" (OuterVolumeSpecName: "kube-api-access-4zxpb") pod "98767fd7-2f73-47e3-9f65-b3e08c558b6f" (UID: "98767fd7-2f73-47e3-9f65-b3e08c558b6f"). InnerVolumeSpecName "kube-api-access-4zxpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.847231 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-694766f497-mk7kp" event={"ID":"354fa154-1782-491f-9aa3-a3e1b6d755d2","Type":"ContainerStarted","Data":"63180d1bc6b719bfd205b579b1dd1ccd79e974a5caa0a56674202484d9bad14b"} Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.847705 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.851406 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "98767fd7-2f73-47e3-9f65-b3e08c558b6f" (UID: "98767fd7-2f73-47e3-9f65-b3e08c558b6f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.881376 4932 scope.go:117] "RemoveContainer" containerID="5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a" Nov 25 10:35:19 crc kubenswrapper[4932]: E1125 10:35:19.881899 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a\": container with ID starting with 5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a not found: ID does not exist" containerID="5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.881937 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a"} err="failed to get container status \"5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a\": rpc error: code = NotFound desc = could not find container \"5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a\": container with ID starting with 5ffff2b47ca714d06bd010b3bbfdb7df7b655c2e25daa97fafd18b058b7fdd2a not found: ID does not exist" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.902806 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.902833 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zxpb\" (UniqueName: \"kubernetes.io/projected/98767fd7-2f73-47e3-9f65-b3e08c558b6f-kube-api-access-4zxpb\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.917207 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-694766f497-mk7kp" podStartSLOduration=2.917167976 podStartE2EDuration="2.917167976s" podCreationTimestamp="2025-11-25 10:35:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:35:19.879635152 +0000 UTC m=+6380.005664715" watchObservedRunningTime="2025-11-25 10:35:19.917167976 +0000 UTC m=+6380.043197539" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.928586 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data" (OuterVolumeSpecName: "config-data") pod "98767fd7-2f73-47e3-9f65-b3e08c558b6f" (UID: "98767fd7-2f73-47e3-9f65-b3e08c558b6f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.934422 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "98767fd7-2f73-47e3-9f65-b3e08c558b6f" (UID: "98767fd7-2f73-47e3-9f65-b3e08c558b6f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:19 crc kubenswrapper[4932]: I1125 10:35:19.957386 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.004895 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.005246 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/98767fd7-2f73-47e3-9f65-b3e08c558b6f-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.106393 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-combined-ca-bundle\") pod \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.106544 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data-custom\") pod \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.106616 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zw8cp\" (UniqueName: \"kubernetes.io/projected/cb7b39c9-c205-48e2-8d32-4028deddc8b5-kube-api-access-zw8cp\") pod \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.106874 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data\") pod \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\" (UID: \"cb7b39c9-c205-48e2-8d32-4028deddc8b5\") " Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.118056 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "cb7b39c9-c205-48e2-8d32-4028deddc8b5" (UID: "cb7b39c9-c205-48e2-8d32-4028deddc8b5"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.118105 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb7b39c9-c205-48e2-8d32-4028deddc8b5-kube-api-access-zw8cp" (OuterVolumeSpecName: "kube-api-access-zw8cp") pod "cb7b39c9-c205-48e2-8d32-4028deddc8b5" (UID: "cb7b39c9-c205-48e2-8d32-4028deddc8b5"). InnerVolumeSpecName "kube-api-access-zw8cp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.161257 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-7888997d94-lljtp"] Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.184723 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-7888997d94-lljtp"] Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.200207 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb7b39c9-c205-48e2-8d32-4028deddc8b5" (UID: "cb7b39c9-c205-48e2-8d32-4028deddc8b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.211201 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.211240 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.211253 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zw8cp\" (UniqueName: \"kubernetes.io/projected/cb7b39c9-c205-48e2-8d32-4028deddc8b5-kube-api-access-zw8cp\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.222401 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data" (OuterVolumeSpecName: "config-data") pod "cb7b39c9-c205-48e2-8d32-4028deddc8b5" (UID: "cb7b39c9-c205-48e2-8d32-4028deddc8b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.314066 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb7b39c9-c205-48e2-8d32-4028deddc8b5-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.623183 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98767fd7-2f73-47e3-9f65-b3e08c558b6f" path="/var/lib/kubelet/pods/98767fd7-2f73-47e3-9f65-b3e08c558b6f/volumes" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.859358 4932 generic.go:334] "Generic (PLEG): container finished" podID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" containerID="b824e8eb43552ec52d15e4c37301c2636f05e831aa9c41903867b8c5ab797b60" exitCode=1 Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.859417 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-557dbd6b8b-rp6cj" event={"ID":"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3","Type":"ContainerDied","Data":"b824e8eb43552ec52d15e4c37301c2636f05e831aa9c41903867b8c5ab797b60"} Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.859450 4932 scope.go:117] "RemoveContainer" containerID="bd3f784effbaf7e6043dbf8b1941b0e9e7f8b2cb624d24c01ba0a7b29af5c2e2" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.860089 4932 scope.go:117] "RemoveContainer" containerID="b824e8eb43552ec52d15e4c37301c2636f05e831aa9c41903867b8c5ab797b60" Nov 25 10:35:20 crc kubenswrapper[4932]: E1125 10:35:20.860332 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-557dbd6b8b-rp6cj_openstack(71ee76e8-fa49-4fe6-a13f-6d7c3c986db3)\"" pod="openstack/heat-api-557dbd6b8b-rp6cj" podUID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.872919 4932 generic.go:334] "Generic (PLEG): container finished" podID="33022877-bd26-4a1c-8efd-afa1d50af04e" containerID="a23cb581cd2d42e3db3c14da47476a97e31cd878c5f181d89e58600645de822b" exitCode=1 Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.872974 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" event={"ID":"33022877-bd26-4a1c-8efd-afa1d50af04e","Type":"ContainerDied","Data":"a23cb581cd2d42e3db3c14da47476a97e31cd878c5f181d89e58600645de822b"} Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.873654 4932 scope.go:117] "RemoveContainer" containerID="a23cb581cd2d42e3db3c14da47476a97e31cd878c5f181d89e58600645de822b" Nov 25 10:35:20 crc kubenswrapper[4932]: E1125 10:35:20.873902 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-69d5fd574b-t8kpc_openstack(33022877-bd26-4a1c-8efd-afa1d50af04e)\"" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" podUID="33022877-bd26-4a1c-8efd-afa1d50af04e" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.875233 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-57d554f45b-2jhr6" event={"ID":"cb7b39c9-c205-48e2-8d32-4028deddc8b5","Type":"ContainerDied","Data":"558c2f6c21301bc6b2f872a1b747d7311f3eabba44587f86b8cc9df564ea73aa"} Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.875299 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-57d554f45b-2jhr6" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.876767 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-7b8b7647f7-b7n2s" event={"ID":"7557fdd5-57f2-4e76-8686-1f9e48ddb23d","Type":"ContainerStarted","Data":"778e33ddfbba31a20702fa9824e5c2397f1936943fb311e42e4ae99ee66389a0"} Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.877238 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.889385 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" event={"ID":"cc9cd39b-0321-4858-9e14-88cbe5cf3013","Type":"ContainerStarted","Data":"da73ef49b21f8a44a1e7390f5e4b87a5e335fb72d4361f2bac8d14b3f009ae3b"} Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.889434 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" event={"ID":"cc9cd39b-0321-4858-9e14-88cbe5cf3013","Type":"ContainerStarted","Data":"9c05a63224fc34fc8e9a57cf4d7c58cf51e74a8addb9447c35800e8c8c6b675b"} Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.890422 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.907130 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-7b8b7647f7-b7n2s" podStartSLOduration=2.90710933 podStartE2EDuration="2.90710933s" podCreationTimestamp="2025-11-25 10:35:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:35:20.905603197 +0000 UTC m=+6381.031632770" watchObservedRunningTime="2025-11-25 10:35:20.90710933 +0000 UTC m=+6381.033138893" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.974282 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" podStartSLOduration=2.974258231 podStartE2EDuration="2.974258231s" podCreationTimestamp="2025-11-25 10:35:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:35:20.972009027 +0000 UTC m=+6381.098038590" watchObservedRunningTime="2025-11-25 10:35:20.974258231 +0000 UTC m=+6381.100287794" Nov 25 10:35:20 crc kubenswrapper[4932]: I1125 10:35:20.997828 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-57d554f45b-2jhr6"] Nov 25 10:35:21 crc kubenswrapper[4932]: I1125 10:35:21.006578 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-57d554f45b-2jhr6"] Nov 25 10:35:21 crc kubenswrapper[4932]: I1125 10:35:21.068747 4932 scope.go:117] "RemoveContainer" containerID="35a72bf51474190817026bbbf10efa200410922f6010f24210a7caa9aaef85dc" Nov 25 10:35:21 crc kubenswrapper[4932]: I1125 10:35:21.424957 4932 scope.go:117] "RemoveContainer" containerID="4b05d2c5e5a46a193fac08bfc6e67da9fbe4f6a3c24038ca987ee7c500695fb2" Nov 25 10:35:21 crc kubenswrapper[4932]: I1125 10:35:21.823969 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-5d9c956f9d-95gps" Nov 25 10:35:21 crc kubenswrapper[4932]: I1125 10:35:21.891277 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-79cb94f994-d24ks"] Nov 25 10:35:21 crc kubenswrapper[4932]: I1125 10:35:21.891569 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-79cb94f994-d24ks" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon-log" containerID="cri-o://49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0" gracePeriod=30 Nov 25 10:35:21 crc kubenswrapper[4932]: I1125 10:35:21.891849 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-79cb94f994-d24ks" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon" containerID="cri-o://af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a" gracePeriod=30 Nov 25 10:35:21 crc kubenswrapper[4932]: I1125 10:35:21.946385 4932 scope.go:117] "RemoveContainer" containerID="b824e8eb43552ec52d15e4c37301c2636f05e831aa9c41903867b8c5ab797b60" Nov 25 10:35:21 crc kubenswrapper[4932]: E1125 10:35:21.946839 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-557dbd6b8b-rp6cj_openstack(71ee76e8-fa49-4fe6-a13f-6d7c3c986db3)\"" pod="openstack/heat-api-557dbd6b8b-rp6cj" podUID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" Nov 25 10:35:21 crc kubenswrapper[4932]: I1125 10:35:21.955279 4932 scope.go:117] "RemoveContainer" containerID="a23cb581cd2d42e3db3c14da47476a97e31cd878c5f181d89e58600645de822b" Nov 25 10:35:21 crc kubenswrapper[4932]: E1125 10:35:21.955720 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-69d5fd574b-t8kpc_openstack(33022877-bd26-4a1c-8efd-afa1d50af04e)\"" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" podUID="33022877-bd26-4a1c-8efd-afa1d50af04e" Nov 25 10:35:22 crc kubenswrapper[4932]: I1125 10:35:22.548557 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:22 crc kubenswrapper[4932]: I1125 10:35:22.548951 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:22 crc kubenswrapper[4932]: I1125 10:35:22.548966 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:22 crc kubenswrapper[4932]: I1125 10:35:22.548980 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:22 crc kubenswrapper[4932]: I1125 10:35:22.617768 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb7b39c9-c205-48e2-8d32-4028deddc8b5" path="/var/lib/kubelet/pods/cb7b39c9-c205-48e2-8d32-4028deddc8b5/volumes" Nov 25 10:35:22 crc kubenswrapper[4932]: I1125 10:35:22.962799 4932 scope.go:117] "RemoveContainer" containerID="b824e8eb43552ec52d15e4c37301c2636f05e831aa9c41903867b8c5ab797b60" Nov 25 10:35:22 crc kubenswrapper[4932]: E1125 10:35:22.963017 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-557dbd6b8b-rp6cj_openstack(71ee76e8-fa49-4fe6-a13f-6d7c3c986db3)\"" pod="openstack/heat-api-557dbd6b8b-rp6cj" podUID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" Nov 25 10:35:22 crc kubenswrapper[4932]: I1125 10:35:22.963073 4932 scope.go:117] "RemoveContainer" containerID="a23cb581cd2d42e3db3c14da47476a97e31cd878c5f181d89e58600645de822b" Nov 25 10:35:22 crc kubenswrapper[4932]: E1125 10:35:22.963368 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-69d5fd574b-t8kpc_openstack(33022877-bd26-4a1c-8efd-afa1d50af04e)\"" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" podUID="33022877-bd26-4a1c-8efd-afa1d50af04e" Nov 25 10:35:25 crc kubenswrapper[4932]: I1125 10:35:25.993660 4932 generic.go:334] "Generic (PLEG): container finished" podID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerID="af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a" exitCode=0 Nov 25 10:35:25 crc kubenswrapper[4932]: I1125 10:35:25.993744 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79cb94f994-d24ks" event={"ID":"0133e4ac-d1bd-455d-9997-4c0d340b9ef7","Type":"ContainerDied","Data":"af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a"} Nov 25 10:35:26 crc kubenswrapper[4932]: I1125 10:35:26.756535 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-79cb94f994-d24ks" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.128:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.128:8443: connect: connection refused" Nov 25 10:35:29 crc kubenswrapper[4932]: I1125 10:35:29.844470 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.270275 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-7b8b7647f7-b7n2s" Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.301297 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-57dc46b8c-d5hhg" Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.346589 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-557dbd6b8b-rp6cj"] Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.388547 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-69d5fd574b-t8kpc"] Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.836593 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.845073 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.951081 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-combined-ca-bundle\") pod \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.951124 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data-custom\") pod \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.951164 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h98zj\" (UniqueName: \"kubernetes.io/projected/33022877-bd26-4a1c-8efd-afa1d50af04e-kube-api-access-h98zj\") pod \"33022877-bd26-4a1c-8efd-afa1d50af04e\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.951369 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data\") pod \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.951440 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data-custom\") pod \"33022877-bd26-4a1c-8efd-afa1d50af04e\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.951475 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jk5tg\" (UniqueName: \"kubernetes.io/projected/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-kube-api-access-jk5tg\") pod \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\" (UID: \"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3\") " Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.951559 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data\") pod \"33022877-bd26-4a1c-8efd-afa1d50af04e\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.951602 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-combined-ca-bundle\") pod \"33022877-bd26-4a1c-8efd-afa1d50af04e\" (UID: \"33022877-bd26-4a1c-8efd-afa1d50af04e\") " Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.957134 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" (UID: "71ee76e8-fa49-4fe6-a13f-6d7c3c986db3"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.957323 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33022877-bd26-4a1c-8efd-afa1d50af04e-kube-api-access-h98zj" (OuterVolumeSpecName: "kube-api-access-h98zj") pod "33022877-bd26-4a1c-8efd-afa1d50af04e" (UID: "33022877-bd26-4a1c-8efd-afa1d50af04e"). InnerVolumeSpecName "kube-api-access-h98zj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.960079 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "33022877-bd26-4a1c-8efd-afa1d50af04e" (UID: "33022877-bd26-4a1c-8efd-afa1d50af04e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.962181 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-kube-api-access-jk5tg" (OuterVolumeSpecName: "kube-api-access-jk5tg") pod "71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" (UID: "71ee76e8-fa49-4fe6-a13f-6d7c3c986db3"). InnerVolumeSpecName "kube-api-access-jk5tg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.982549 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33022877-bd26-4a1c-8efd-afa1d50af04e" (UID: "33022877-bd26-4a1c-8efd-afa1d50af04e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:30 crc kubenswrapper[4932]: I1125 10:35:30.985933 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" (UID: "71ee76e8-fa49-4fe6-a13f-6d7c3c986db3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.017814 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data" (OuterVolumeSpecName: "config-data") pod "71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" (UID: "71ee76e8-fa49-4fe6-a13f-6d7c3c986db3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.025472 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data" (OuterVolumeSpecName: "config-data") pod "33022877-bd26-4a1c-8efd-afa1d50af04e" (UID: "33022877-bd26-4a1c-8efd-afa1d50af04e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.040545 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" event={"ID":"33022877-bd26-4a1c-8efd-afa1d50af04e","Type":"ContainerDied","Data":"083dca86548a1593a0dae23a89d61d49af384034373c307126a3bbfea8343953"} Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.040591 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-69d5fd574b-t8kpc" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.040608 4932 scope.go:117] "RemoveContainer" containerID="a23cb581cd2d42e3db3c14da47476a97e31cd878c5f181d89e58600645de822b" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.043787 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-557dbd6b8b-rp6cj" event={"ID":"71ee76e8-fa49-4fe6-a13f-6d7c3c986db3","Type":"ContainerDied","Data":"da2036357bdaaebacd373f708e804ca422e62cd3a8df17d56069c2a0fc22834c"} Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.043886 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-557dbd6b8b-rp6cj" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.054606 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.054872 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jk5tg\" (UniqueName: \"kubernetes.io/projected/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-kube-api-access-jk5tg\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.054885 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.054893 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33022877-bd26-4a1c-8efd-afa1d50af04e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.054902 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.054912 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.054919 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h98zj\" (UniqueName: \"kubernetes.io/projected/33022877-bd26-4a1c-8efd-afa1d50af04e-kube-api-access-h98zj\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.054929 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.127495 4932 scope.go:117] "RemoveContainer" containerID="b824e8eb43552ec52d15e4c37301c2636f05e831aa9c41903867b8c5ab797b60" Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.141984 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-69d5fd574b-t8kpc"] Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.155130 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-69d5fd574b-t8kpc"] Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.163759 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-557dbd6b8b-rp6cj"] Nov 25 10:35:31 crc kubenswrapper[4932]: I1125 10:35:31.172541 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-557dbd6b8b-rp6cj"] Nov 25 10:35:32 crc kubenswrapper[4932]: I1125 10:35:32.618540 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33022877-bd26-4a1c-8efd-afa1d50af04e" path="/var/lib/kubelet/pods/33022877-bd26-4a1c-8efd-afa1d50af04e/volumes" Nov 25 10:35:32 crc kubenswrapper[4932]: I1125 10:35:32.619742 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" path="/var/lib/kubelet/pods/71ee76e8-fa49-4fe6-a13f-6d7c3c986db3/volumes" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.118149 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2wjff"] Nov 25 10:35:34 crc kubenswrapper[4932]: E1125 10:35:34.119814 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33022877-bd26-4a1c-8efd-afa1d50af04e" containerName="heat-cfnapi" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.119837 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="33022877-bd26-4a1c-8efd-afa1d50af04e" containerName="heat-cfnapi" Nov 25 10:35:34 crc kubenswrapper[4932]: E1125 10:35:34.119845 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98767fd7-2f73-47e3-9f65-b3e08c558b6f" containerName="heat-cfnapi" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.119850 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="98767fd7-2f73-47e3-9f65-b3e08c558b6f" containerName="heat-cfnapi" Nov 25 10:35:34 crc kubenswrapper[4932]: E1125 10:35:34.119863 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb7b39c9-c205-48e2-8d32-4028deddc8b5" containerName="heat-api" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.119870 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb7b39c9-c205-48e2-8d32-4028deddc8b5" containerName="heat-api" Nov 25 10:35:34 crc kubenswrapper[4932]: E1125 10:35:34.119900 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33022877-bd26-4a1c-8efd-afa1d50af04e" containerName="heat-cfnapi" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.119908 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="33022877-bd26-4a1c-8efd-afa1d50af04e" containerName="heat-cfnapi" Nov 25 10:35:34 crc kubenswrapper[4932]: E1125 10:35:34.119921 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" containerName="heat-api" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.119929 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" containerName="heat-api" Nov 25 10:35:34 crc kubenswrapper[4932]: E1125 10:35:34.119947 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" containerName="heat-api" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.119953 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" containerName="heat-api" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.120230 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="33022877-bd26-4a1c-8efd-afa1d50af04e" containerName="heat-cfnapi" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.120251 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb7b39c9-c205-48e2-8d32-4028deddc8b5" containerName="heat-api" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.120262 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" containerName="heat-api" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.120277 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="33022877-bd26-4a1c-8efd-afa1d50af04e" containerName="heat-cfnapi" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.120286 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="98767fd7-2f73-47e3-9f65-b3e08c558b6f" containerName="heat-cfnapi" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.120781 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="71ee76e8-fa49-4fe6-a13f-6d7c3c986db3" containerName="heat-api" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.122089 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.160069 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2wjff"] Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.220953 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-utilities\") pod \"certified-operators-2wjff\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.221345 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm2h8\" (UniqueName: \"kubernetes.io/projected/c8c9da86-561b-42ed-a065-702f30c6d838-kube-api-access-hm2h8\") pod \"certified-operators-2wjff\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.221402 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-catalog-content\") pod \"certified-operators-2wjff\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.323257 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-utilities\") pod \"certified-operators-2wjff\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.323371 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm2h8\" (UniqueName: \"kubernetes.io/projected/c8c9da86-561b-42ed-a065-702f30c6d838-kube-api-access-hm2h8\") pod \"certified-operators-2wjff\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.323715 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-catalog-content\") pod \"certified-operators-2wjff\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.323729 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-utilities\") pod \"certified-operators-2wjff\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.324076 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-catalog-content\") pod \"certified-operators-2wjff\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.347203 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm2h8\" (UniqueName: \"kubernetes.io/projected/c8c9da86-561b-42ed-a065-702f30c6d838-kube-api-access-hm2h8\") pod \"certified-operators-2wjff\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:34 crc kubenswrapper[4932]: I1125 10:35:34.447276 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:35 crc kubenswrapper[4932]: I1125 10:35:35.143802 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2wjff"] Nov 25 10:35:36 crc kubenswrapper[4932]: I1125 10:35:36.103077 4932 generic.go:334] "Generic (PLEG): container finished" podID="c8c9da86-561b-42ed-a065-702f30c6d838" containerID="571f605864239ffad1725728cf152182211b3145b9c85ebff696162414bd2e1b" exitCode=0 Nov 25 10:35:36 crc kubenswrapper[4932]: I1125 10:35:36.103134 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wjff" event={"ID":"c8c9da86-561b-42ed-a065-702f30c6d838","Type":"ContainerDied","Data":"571f605864239ffad1725728cf152182211b3145b9c85ebff696162414bd2e1b"} Nov 25 10:35:36 crc kubenswrapper[4932]: I1125 10:35:36.103438 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wjff" event={"ID":"c8c9da86-561b-42ed-a065-702f30c6d838","Type":"ContainerStarted","Data":"35db57e1f75ee0df16ee2f31226b49ef9b110ef602847078f1d3c2640ede28ff"} Nov 25 10:35:36 crc kubenswrapper[4932]: I1125 10:35:36.755793 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-79cb94f994-d24ks" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.128:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.128:8443: connect: connection refused" Nov 25 10:35:37 crc kubenswrapper[4932]: I1125 10:35:37.119521 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wjff" event={"ID":"c8c9da86-561b-42ed-a065-702f30c6d838","Type":"ContainerStarted","Data":"ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848"} Nov 25 10:35:37 crc kubenswrapper[4932]: I1125 10:35:37.181329 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:35:37 crc kubenswrapper[4932]: I1125 10:35:37.181396 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:35:37 crc kubenswrapper[4932]: I1125 10:35:37.181443 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 10:35:37 crc kubenswrapper[4932]: I1125 10:35:37.182079 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:35:37 crc kubenswrapper[4932]: I1125 10:35:37.182138 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" gracePeriod=600 Nov 25 10:35:37 crc kubenswrapper[4932]: E1125 10:35:37.310950 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:35:37 crc kubenswrapper[4932]: I1125 10:35:37.582398 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-694766f497-mk7kp" Nov 25 10:35:37 crc kubenswrapper[4932]: I1125 10:35:37.628675 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-846f7bf768-bdlxf"] Nov 25 10:35:37 crc kubenswrapper[4932]: I1125 10:35:37.629256 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-846f7bf768-bdlxf" podUID="a6480479-6f69-4d9c-80a6-8b2269df40fc" containerName="heat-engine" containerID="cri-o://768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4" gracePeriod=60 Nov 25 10:35:38 crc kubenswrapper[4932]: I1125 10:35:38.136741 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" exitCode=0 Nov 25 10:35:38 crc kubenswrapper[4932]: I1125 10:35:38.136819 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1"} Nov 25 10:35:38 crc kubenswrapper[4932]: I1125 10:35:38.136898 4932 scope.go:117] "RemoveContainer" containerID="d3ec26a09840ae0db21e9656db7082103181dfd412ba17c268a25c18af62eb7f" Nov 25 10:35:38 crc kubenswrapper[4932]: I1125 10:35:38.138022 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:35:38 crc kubenswrapper[4932]: E1125 10:35:38.138558 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:35:39 crc kubenswrapper[4932]: I1125 10:35:39.164770 4932 generic.go:334] "Generic (PLEG): container finished" podID="c8c9da86-561b-42ed-a065-702f30c6d838" containerID="ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848" exitCode=0 Nov 25 10:35:39 crc kubenswrapper[4932]: I1125 10:35:39.164865 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wjff" event={"ID":"c8c9da86-561b-42ed-a065-702f30c6d838","Type":"ContainerDied","Data":"ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848"} Nov 25 10:35:39 crc kubenswrapper[4932]: E1125 10:35:39.819026 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:35:39 crc kubenswrapper[4932]: E1125 10:35:39.820579 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:35:39 crc kubenswrapper[4932]: E1125 10:35:39.822044 4932 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 25 10:35:39 crc kubenswrapper[4932]: E1125 10:35:39.822086 4932 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-846f7bf768-bdlxf" podUID="a6480479-6f69-4d9c-80a6-8b2269df40fc" containerName="heat-engine" Nov 25 10:35:40 crc kubenswrapper[4932]: I1125 10:35:40.175955 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wjff" event={"ID":"c8c9da86-561b-42ed-a065-702f30c6d838","Type":"ContainerStarted","Data":"e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029"} Nov 25 10:35:40 crc kubenswrapper[4932]: I1125 10:35:40.234225 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2wjff" podStartSLOduration=2.7264995499999998 podStartE2EDuration="6.234204191s" podCreationTimestamp="2025-11-25 10:35:34 +0000 UTC" firstStartedPulling="2025-11-25 10:35:36.104898984 +0000 UTC m=+6396.230928547" lastFinishedPulling="2025-11-25 10:35:39.612603635 +0000 UTC m=+6399.738633188" observedRunningTime="2025-11-25 10:35:40.224433651 +0000 UTC m=+6400.350463214" watchObservedRunningTime="2025-11-25 10:35:40.234204191 +0000 UTC m=+6400.360233754" Nov 25 10:35:44 crc kubenswrapper[4932]: I1125 10:35:44.448897 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:44 crc kubenswrapper[4932]: I1125 10:35:44.450517 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:35:45 crc kubenswrapper[4932]: I1125 10:35:45.517815 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-2wjff" podUID="c8c9da86-561b-42ed-a065-702f30c6d838" containerName="registry-server" probeResult="failure" output=< Nov 25 10:35:45 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 10:35:45 crc kubenswrapper[4932]: > Nov 25 10:35:46 crc kubenswrapper[4932]: I1125 10:35:46.756589 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-79cb94f994-d24ks" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.128:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.128:8443: connect: connection refused" Nov 25 10:35:46 crc kubenswrapper[4932]: I1125 10:35:46.757006 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:35:48 crc kubenswrapper[4932]: I1125 10:35:48.606137 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:35:48 crc kubenswrapper[4932]: E1125 10:35:48.606835 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.194099 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.262366 4932 generic.go:334] "Generic (PLEG): container finished" podID="a6480479-6f69-4d9c-80a6-8b2269df40fc" containerID="768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4" exitCode=0 Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.262406 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-846f7bf768-bdlxf" event={"ID":"a6480479-6f69-4d9c-80a6-8b2269df40fc","Type":"ContainerDied","Data":"768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4"} Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.262449 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-846f7bf768-bdlxf" event={"ID":"a6480479-6f69-4d9c-80a6-8b2269df40fc","Type":"ContainerDied","Data":"e8dd5adcb24804a263ac4f82e836aaae04623de8dc06eec26ef1689ada53feec"} Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.262455 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-846f7bf768-bdlxf" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.262466 4932 scope.go:117] "RemoveContainer" containerID="768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.285665 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data\") pod \"a6480479-6f69-4d9c-80a6-8b2269df40fc\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.285750 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-combined-ca-bundle\") pod \"a6480479-6f69-4d9c-80a6-8b2269df40fc\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.286037 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdgkp\" (UniqueName: \"kubernetes.io/projected/a6480479-6f69-4d9c-80a6-8b2269df40fc-kube-api-access-bdgkp\") pod \"a6480479-6f69-4d9c-80a6-8b2269df40fc\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.286144 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data-custom\") pod \"a6480479-6f69-4d9c-80a6-8b2269df40fc\" (UID: \"a6480479-6f69-4d9c-80a6-8b2269df40fc\") " Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.292630 4932 scope.go:117] "RemoveContainer" containerID="768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4" Nov 25 10:35:49 crc kubenswrapper[4932]: E1125 10:35:49.293646 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4\": container with ID starting with 768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4 not found: ID does not exist" containerID="768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.293907 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4"} err="failed to get container status \"768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4\": rpc error: code = NotFound desc = could not find container \"768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4\": container with ID starting with 768f45f7846a3c2830cb9bfca1baed619a79425a12602d68aaca706ad2b3c4f4 not found: ID does not exist" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.301516 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a6480479-6f69-4d9c-80a6-8b2269df40fc" (UID: "a6480479-6f69-4d9c-80a6-8b2269df40fc"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.312446 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6480479-6f69-4d9c-80a6-8b2269df40fc-kube-api-access-bdgkp" (OuterVolumeSpecName: "kube-api-access-bdgkp") pod "a6480479-6f69-4d9c-80a6-8b2269df40fc" (UID: "a6480479-6f69-4d9c-80a6-8b2269df40fc"). InnerVolumeSpecName "kube-api-access-bdgkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.349178 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data" (OuterVolumeSpecName: "config-data") pod "a6480479-6f69-4d9c-80a6-8b2269df40fc" (UID: "a6480479-6f69-4d9c-80a6-8b2269df40fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.349337 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a6480479-6f69-4d9c-80a6-8b2269df40fc" (UID: "a6480479-6f69-4d9c-80a6-8b2269df40fc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.389090 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdgkp\" (UniqueName: \"kubernetes.io/projected/a6480479-6f69-4d9c-80a6-8b2269df40fc-kube-api-access-bdgkp\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.389139 4932 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.389148 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.389158 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6480479-6f69-4d9c-80a6-8b2269df40fc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.597711 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-846f7bf768-bdlxf"] Nov 25 10:35:49 crc kubenswrapper[4932]: I1125 10:35:49.608275 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-846f7bf768-bdlxf"] Nov 25 10:35:50 crc kubenswrapper[4932]: I1125 10:35:50.617232 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6480479-6f69-4d9c-80a6-8b2269df40fc" path="/var/lib/kubelet/pods/a6480479-6f69-4d9c-80a6-8b2269df40fc/volumes" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.289516 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.291101 4932 generic.go:334] "Generic (PLEG): container finished" podID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerID="49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0" exitCode=137 Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.291128 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79cb94f994-d24ks" event={"ID":"0133e4ac-d1bd-455d-9997-4c0d340b9ef7","Type":"ContainerDied","Data":"49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0"} Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.291149 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79cb94f994-d24ks" event={"ID":"0133e4ac-d1bd-455d-9997-4c0d340b9ef7","Type":"ContainerDied","Data":"a7d633b15a4aa20733d9fc611a2519f0f2f5b13281eb3408938bbec94806f45f"} Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.291171 4932 scope.go:117] "RemoveContainer" containerID="af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.371112 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-config-data\") pod \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.371156 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-combined-ca-bundle\") pod \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.371285 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-tls-certs\") pod \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.371313 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-secret-key\") pod \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.371350 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-logs\") pod \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.371383 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-scripts\") pod \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.371418 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zrhjp\" (UniqueName: \"kubernetes.io/projected/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-kube-api-access-zrhjp\") pod \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\" (UID: \"0133e4ac-d1bd-455d-9997-4c0d340b9ef7\") " Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.373176 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-logs" (OuterVolumeSpecName: "logs") pod "0133e4ac-d1bd-455d-9997-4c0d340b9ef7" (UID: "0133e4ac-d1bd-455d-9997-4c0d340b9ef7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.377487 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-kube-api-access-zrhjp" (OuterVolumeSpecName: "kube-api-access-zrhjp") pod "0133e4ac-d1bd-455d-9997-4c0d340b9ef7" (UID: "0133e4ac-d1bd-455d-9997-4c0d340b9ef7"). InnerVolumeSpecName "kube-api-access-zrhjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.378024 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "0133e4ac-d1bd-455d-9997-4c0d340b9ef7" (UID: "0133e4ac-d1bd-455d-9997-4c0d340b9ef7"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.398959 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-config-data" (OuterVolumeSpecName: "config-data") pod "0133e4ac-d1bd-455d-9997-4c0d340b9ef7" (UID: "0133e4ac-d1bd-455d-9997-4c0d340b9ef7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.405411 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-scripts" (OuterVolumeSpecName: "scripts") pod "0133e4ac-d1bd-455d-9997-4c0d340b9ef7" (UID: "0133e4ac-d1bd-455d-9997-4c0d340b9ef7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.408080 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0133e4ac-d1bd-455d-9997-4c0d340b9ef7" (UID: "0133e4ac-d1bd-455d-9997-4c0d340b9ef7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.438686 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "0133e4ac-d1bd-455d-9997-4c0d340b9ef7" (UID: "0133e4ac-d1bd-455d-9997-4c0d340b9ef7"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.469362 4932 scope.go:117] "RemoveContainer" containerID="49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.473868 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.473909 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.474087 4932 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.474109 4932 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.474120 4932 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-logs\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.474131 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.474169 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zrhjp\" (UniqueName: \"kubernetes.io/projected/0133e4ac-d1bd-455d-9997-4c0d340b9ef7-kube-api-access-zrhjp\") on node \"crc\" DevicePath \"\"" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.517324 4932 scope.go:117] "RemoveContainer" containerID="af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a" Nov 25 10:35:52 crc kubenswrapper[4932]: E1125 10:35:52.517800 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a\": container with ID starting with af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a not found: ID does not exist" containerID="af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.517921 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a"} err="failed to get container status \"af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a\": rpc error: code = NotFound desc = could not find container \"af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a\": container with ID starting with af4116c615d93694b1488526ea6de07ace79737b815cb4347bd34ee2397c587a not found: ID does not exist" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.518242 4932 scope.go:117] "RemoveContainer" containerID="49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0" Nov 25 10:35:52 crc kubenswrapper[4932]: E1125 10:35:52.518896 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0\": container with ID starting with 49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0 not found: ID does not exist" containerID="49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0" Nov 25 10:35:52 crc kubenswrapper[4932]: I1125 10:35:52.518985 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0"} err="failed to get container status \"49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0\": rpc error: code = NotFound desc = could not find container \"49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0\": container with ID starting with 49de02719b50a57d47c35a698f41f53978798e24cfa8db946e1f5b5e7d0f96f0 not found: ID does not exist" Nov 25 10:35:53 crc kubenswrapper[4932]: I1125 10:35:53.303542 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79cb94f994-d24ks" Nov 25 10:35:53 crc kubenswrapper[4932]: I1125 10:35:53.333451 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-79cb94f994-d24ks"] Nov 25 10:35:53 crc kubenswrapper[4932]: I1125 10:35:53.342425 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-79cb94f994-d24ks"] Nov 25 10:35:54 crc kubenswrapper[4932]: I1125 10:35:54.040034 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-t2vdr"] Nov 25 10:35:54 crc kubenswrapper[4932]: I1125 10:35:54.048451 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-627c-account-create-jm7w5"] Nov 25 10:35:54 crc kubenswrapper[4932]: I1125 10:35:54.056761 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-t2vdr"] Nov 25 10:35:54 crc kubenswrapper[4932]: I1125 10:35:54.065169 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-627c-account-create-jm7w5"] Nov 25 10:35:54 crc kubenswrapper[4932]: I1125 10:35:54.616318 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" path="/var/lib/kubelet/pods/0133e4ac-d1bd-455d-9997-4c0d340b9ef7/volumes" Nov 25 10:35:54 crc kubenswrapper[4932]: I1125 10:35:54.616976 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="291edd7f-043c-45bc-9229-98c45b151377" path="/var/lib/kubelet/pods/291edd7f-043c-45bc-9229-98c45b151377/volumes" Nov 25 10:35:54 crc kubenswrapper[4932]: I1125 10:35:54.617702 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f9473ff-2e53-426d-8c1a-3da28f58404b" path="/var/lib/kubelet/pods/6f9473ff-2e53-426d-8c1a-3da28f58404b/volumes" Nov 25 10:35:55 crc kubenswrapper[4932]: I1125 10:35:55.498847 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-2wjff" podUID="c8c9da86-561b-42ed-a065-702f30c6d838" containerName="registry-server" probeResult="failure" output=< Nov 25 10:35:55 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 10:35:55 crc kubenswrapper[4932]: > Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.695429 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228"] Nov 25 10:35:57 crc kubenswrapper[4932]: E1125 10:35:57.696759 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.696773 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon" Nov 25 10:35:57 crc kubenswrapper[4932]: E1125 10:35:57.696806 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon-log" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.696812 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon-log" Nov 25 10:35:57 crc kubenswrapper[4932]: E1125 10:35:57.696819 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6480479-6f69-4d9c-80a6-8b2269df40fc" containerName="heat-engine" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.696825 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6480479-6f69-4d9c-80a6-8b2269df40fc" containerName="heat-engine" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.697019 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon-log" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.697040 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6480479-6f69-4d9c-80a6-8b2269df40fc" containerName="heat-engine" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.697058 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="0133e4ac-d1bd-455d-9997-4c0d340b9ef7" containerName="horizon" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.698546 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.704076 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.722602 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228"] Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.790474 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.790528 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pk7qv\" (UniqueName: \"kubernetes.io/projected/54169020-b05c-4f3c-8b3b-de6b94a73e23-kube-api-access-pk7qv\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.790782 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.892650 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.892788 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.892830 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pk7qv\" (UniqueName: \"kubernetes.io/projected/54169020-b05c-4f3c-8b3b-de6b94a73e23-kube-api-access-pk7qv\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.893154 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.893307 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:35:57 crc kubenswrapper[4932]: I1125 10:35:57.911863 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pk7qv\" (UniqueName: \"kubernetes.io/projected/54169020-b05c-4f3c-8b3b-de6b94a73e23-kube-api-access-pk7qv\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:35:58 crc kubenswrapper[4932]: I1125 10:35:58.030441 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:35:58 crc kubenswrapper[4932]: I1125 10:35:58.485507 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228"] Nov 25 10:35:59 crc kubenswrapper[4932]: I1125 10:35:59.353271 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" event={"ID":"54169020-b05c-4f3c-8b3b-de6b94a73e23","Type":"ContainerStarted","Data":"a9c4ab44bf77ad496f7e18ad398a49273318b02966d5c0f9c524bba5c02f63a7"} Nov 25 10:35:59 crc kubenswrapper[4932]: I1125 10:35:59.353316 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" event={"ID":"54169020-b05c-4f3c-8b3b-de6b94a73e23","Type":"ContainerStarted","Data":"91a5ed664d8750e558fba52047abe704f69ba405eb092b8f7711a4a61b611b15"} Nov 25 10:35:59 crc kubenswrapper[4932]: I1125 10:35:59.606593 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:35:59 crc kubenswrapper[4932]: E1125 10:35:59.607635 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:36:00 crc kubenswrapper[4932]: I1125 10:36:00.364264 4932 generic.go:334] "Generic (PLEG): container finished" podID="54169020-b05c-4f3c-8b3b-de6b94a73e23" containerID="a9c4ab44bf77ad496f7e18ad398a49273318b02966d5c0f9c524bba5c02f63a7" exitCode=0 Nov 25 10:36:00 crc kubenswrapper[4932]: I1125 10:36:00.364321 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" event={"ID":"54169020-b05c-4f3c-8b3b-de6b94a73e23","Type":"ContainerDied","Data":"a9c4ab44bf77ad496f7e18ad398a49273318b02966d5c0f9c524bba5c02f63a7"} Nov 25 10:36:02 crc kubenswrapper[4932]: I1125 10:36:02.382457 4932 generic.go:334] "Generic (PLEG): container finished" podID="54169020-b05c-4f3c-8b3b-de6b94a73e23" containerID="2d244078967316a77f05186c2aee39b6692177ff248dcba6b18056dad98dd60b" exitCode=0 Nov 25 10:36:02 crc kubenswrapper[4932]: I1125 10:36:02.382559 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" event={"ID":"54169020-b05c-4f3c-8b3b-de6b94a73e23","Type":"ContainerDied","Data":"2d244078967316a77f05186c2aee39b6692177ff248dcba6b18056dad98dd60b"} Nov 25 10:36:03 crc kubenswrapper[4932]: I1125 10:36:03.395702 4932 generic.go:334] "Generic (PLEG): container finished" podID="54169020-b05c-4f3c-8b3b-de6b94a73e23" containerID="67b77a2403c4ec579a23829d3a6946541d03d8c2bf5674b84a54f9b4c6e1451c" exitCode=0 Nov 25 10:36:03 crc kubenswrapper[4932]: I1125 10:36:03.395752 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" event={"ID":"54169020-b05c-4f3c-8b3b-de6b94a73e23","Type":"ContainerDied","Data":"67b77a2403c4ec579a23829d3a6946541d03d8c2bf5674b84a54f9b4c6e1451c"} Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.507998 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.565753 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.760532 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.830951 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pk7qv\" (UniqueName: \"kubernetes.io/projected/54169020-b05c-4f3c-8b3b-de6b94a73e23-kube-api-access-pk7qv\") pod \"54169020-b05c-4f3c-8b3b-de6b94a73e23\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.831977 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-bundle\") pod \"54169020-b05c-4f3c-8b3b-de6b94a73e23\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.832312 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-util\") pod \"54169020-b05c-4f3c-8b3b-de6b94a73e23\" (UID: \"54169020-b05c-4f3c-8b3b-de6b94a73e23\") " Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.834056 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-bundle" (OuterVolumeSpecName: "bundle") pod "54169020-b05c-4f3c-8b3b-de6b94a73e23" (UID: "54169020-b05c-4f3c-8b3b-de6b94a73e23"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.841762 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/54169020-b05c-4f3c-8b3b-de6b94a73e23-kube-api-access-pk7qv" (OuterVolumeSpecName: "kube-api-access-pk7qv") pod "54169020-b05c-4f3c-8b3b-de6b94a73e23" (UID: "54169020-b05c-4f3c-8b3b-de6b94a73e23"). InnerVolumeSpecName "kube-api-access-pk7qv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.843094 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-util" (OuterVolumeSpecName: "util") pod "54169020-b05c-4f3c-8b3b-de6b94a73e23" (UID: "54169020-b05c-4f3c-8b3b-de6b94a73e23"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.934926 4932 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-util\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.934970 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pk7qv\" (UniqueName: \"kubernetes.io/projected/54169020-b05c-4f3c-8b3b-de6b94a73e23-kube-api-access-pk7qv\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:04 crc kubenswrapper[4932]: I1125 10:36:04.934990 4932 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/54169020-b05c-4f3c-8b3b-de6b94a73e23-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:05 crc kubenswrapper[4932]: I1125 10:36:05.420009 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" event={"ID":"54169020-b05c-4f3c-8b3b-de6b94a73e23","Type":"ContainerDied","Data":"91a5ed664d8750e558fba52047abe704f69ba405eb092b8f7711a4a61b611b15"} Nov 25 10:36:05 crc kubenswrapper[4932]: I1125 10:36:05.420295 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91a5ed664d8750e558fba52047abe704f69ba405eb092b8f7711a4a61b611b15" Nov 25 10:36:05 crc kubenswrapper[4932]: I1125 10:36:05.420055 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210q4228" Nov 25 10:36:05 crc kubenswrapper[4932]: I1125 10:36:05.772574 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2wjff"] Nov 25 10:36:06 crc kubenswrapper[4932]: I1125 10:36:06.035161 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-grmcg"] Nov 25 10:36:06 crc kubenswrapper[4932]: I1125 10:36:06.047231 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-grmcg"] Nov 25 10:36:06 crc kubenswrapper[4932]: I1125 10:36:06.428483 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2wjff" podUID="c8c9da86-561b-42ed-a065-702f30c6d838" containerName="registry-server" containerID="cri-o://e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029" gracePeriod=2 Nov 25 10:36:06 crc kubenswrapper[4932]: I1125 10:36:06.618440 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0fbeab9-89fc-48ff-87f4-eacc07a5bc80" path="/var/lib/kubelet/pods/c0fbeab9-89fc-48ff-87f4-eacc07a5bc80/volumes" Nov 25 10:36:06 crc kubenswrapper[4932]: I1125 10:36:06.876727 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:36:06 crc kubenswrapper[4932]: I1125 10:36:06.976467 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-utilities\") pod \"c8c9da86-561b-42ed-a065-702f30c6d838\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " Nov 25 10:36:06 crc kubenswrapper[4932]: I1125 10:36:06.977216 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm2h8\" (UniqueName: \"kubernetes.io/projected/c8c9da86-561b-42ed-a065-702f30c6d838-kube-api-access-hm2h8\") pod \"c8c9da86-561b-42ed-a065-702f30c6d838\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " Nov 25 10:36:06 crc kubenswrapper[4932]: I1125 10:36:06.977317 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-catalog-content\") pod \"c8c9da86-561b-42ed-a065-702f30c6d838\" (UID: \"c8c9da86-561b-42ed-a065-702f30c6d838\") " Nov 25 10:36:06 crc kubenswrapper[4932]: I1125 10:36:06.977524 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-utilities" (OuterVolumeSpecName: "utilities") pod "c8c9da86-561b-42ed-a065-702f30c6d838" (UID: "c8c9da86-561b-42ed-a065-702f30c6d838"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:36:06 crc kubenswrapper[4932]: I1125 10:36:06.978361 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:06 crc kubenswrapper[4932]: I1125 10:36:06.983877 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8c9da86-561b-42ed-a065-702f30c6d838-kube-api-access-hm2h8" (OuterVolumeSpecName: "kube-api-access-hm2h8") pod "c8c9da86-561b-42ed-a065-702f30c6d838" (UID: "c8c9da86-561b-42ed-a065-702f30c6d838"). InnerVolumeSpecName "kube-api-access-hm2h8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.023967 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c8c9da86-561b-42ed-a065-702f30c6d838" (UID: "c8c9da86-561b-42ed-a065-702f30c6d838"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.080050 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hm2h8\" (UniqueName: \"kubernetes.io/projected/c8c9da86-561b-42ed-a065-702f30c6d838-kube-api-access-hm2h8\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.080098 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8c9da86-561b-42ed-a065-702f30c6d838-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.444434 4932 generic.go:334] "Generic (PLEG): container finished" podID="c8c9da86-561b-42ed-a065-702f30c6d838" containerID="e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029" exitCode=0 Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.444513 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2wjff" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.444519 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wjff" event={"ID":"c8c9da86-561b-42ed-a065-702f30c6d838","Type":"ContainerDied","Data":"e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029"} Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.445262 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2wjff" event={"ID":"c8c9da86-561b-42ed-a065-702f30c6d838","Type":"ContainerDied","Data":"35db57e1f75ee0df16ee2f31226b49ef9b110ef602847078f1d3c2640ede28ff"} Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.445311 4932 scope.go:117] "RemoveContainer" containerID="e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.479117 4932 scope.go:117] "RemoveContainer" containerID="ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.485250 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2wjff"] Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.494019 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2wjff"] Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.505847 4932 scope.go:117] "RemoveContainer" containerID="571f605864239ffad1725728cf152182211b3145b9c85ebff696162414bd2e1b" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.573901 4932 scope.go:117] "RemoveContainer" containerID="e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029" Nov 25 10:36:07 crc kubenswrapper[4932]: E1125 10:36:07.574475 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029\": container with ID starting with e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029 not found: ID does not exist" containerID="e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.574528 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029"} err="failed to get container status \"e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029\": rpc error: code = NotFound desc = could not find container \"e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029\": container with ID starting with e3f040aa2fb2693da7b3ee16ffdcf712e43378278fc7ea9ee1b95ffdcfc87029 not found: ID does not exist" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.574562 4932 scope.go:117] "RemoveContainer" containerID="ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848" Nov 25 10:36:07 crc kubenswrapper[4932]: E1125 10:36:07.575042 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848\": container with ID starting with ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848 not found: ID does not exist" containerID="ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.575075 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848"} err="failed to get container status \"ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848\": rpc error: code = NotFound desc = could not find container \"ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848\": container with ID starting with ff96d1a1bb1f451438f8ddab3baa6c285942b0caabedae2c57501a2b72411848 not found: ID does not exist" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.575093 4932 scope.go:117] "RemoveContainer" containerID="571f605864239ffad1725728cf152182211b3145b9c85ebff696162414bd2e1b" Nov 25 10:36:07 crc kubenswrapper[4932]: E1125 10:36:07.575583 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"571f605864239ffad1725728cf152182211b3145b9c85ebff696162414bd2e1b\": container with ID starting with 571f605864239ffad1725728cf152182211b3145b9c85ebff696162414bd2e1b not found: ID does not exist" containerID="571f605864239ffad1725728cf152182211b3145b9c85ebff696162414bd2e1b" Nov 25 10:36:07 crc kubenswrapper[4932]: I1125 10:36:07.575611 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"571f605864239ffad1725728cf152182211b3145b9c85ebff696162414bd2e1b"} err="failed to get container status \"571f605864239ffad1725728cf152182211b3145b9c85ebff696162414bd2e1b\": rpc error: code = NotFound desc = could not find container \"571f605864239ffad1725728cf152182211b3145b9c85ebff696162414bd2e1b\": container with ID starting with 571f605864239ffad1725728cf152182211b3145b9c85ebff696162414bd2e1b not found: ID does not exist" Nov 25 10:36:08 crc kubenswrapper[4932]: I1125 10:36:08.622130 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8c9da86-561b-42ed-a065-702f30c6d838" path="/var/lib/kubelet/pods/c8c9da86-561b-42ed-a065-702f30c6d838/volumes" Nov 25 10:36:13 crc kubenswrapper[4932]: I1125 10:36:13.607256 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:36:13 crc kubenswrapper[4932]: E1125 10:36:13.608319 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.473641 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-5w6sw"] Nov 25 10:36:15 crc kubenswrapper[4932]: E1125 10:36:15.474584 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8c9da86-561b-42ed-a065-702f30c6d838" containerName="registry-server" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.474616 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8c9da86-561b-42ed-a065-702f30c6d838" containerName="registry-server" Nov 25 10:36:15 crc kubenswrapper[4932]: E1125 10:36:15.474645 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54169020-b05c-4f3c-8b3b-de6b94a73e23" containerName="pull" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.474652 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="54169020-b05c-4f3c-8b3b-de6b94a73e23" containerName="pull" Nov 25 10:36:15 crc kubenswrapper[4932]: E1125 10:36:15.474666 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8c9da86-561b-42ed-a065-702f30c6d838" containerName="extract-utilities" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.474672 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8c9da86-561b-42ed-a065-702f30c6d838" containerName="extract-utilities" Nov 25 10:36:15 crc kubenswrapper[4932]: E1125 10:36:15.474690 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8c9da86-561b-42ed-a065-702f30c6d838" containerName="extract-content" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.474697 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8c9da86-561b-42ed-a065-702f30c6d838" containerName="extract-content" Nov 25 10:36:15 crc kubenswrapper[4932]: E1125 10:36:15.474728 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54169020-b05c-4f3c-8b3b-de6b94a73e23" containerName="extract" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.474734 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="54169020-b05c-4f3c-8b3b-de6b94a73e23" containerName="extract" Nov 25 10:36:15 crc kubenswrapper[4932]: E1125 10:36:15.474751 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="54169020-b05c-4f3c-8b3b-de6b94a73e23" containerName="util" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.474758 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="54169020-b05c-4f3c-8b3b-de6b94a73e23" containerName="util" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.474969 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8c9da86-561b-42ed-a065-702f30c6d838" containerName="registry-server" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.474979 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="54169020-b05c-4f3c-8b3b-de6b94a73e23" containerName="extract" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.475708 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-5w6sw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.479412 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.479680 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-tqwz9" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.479859 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.500376 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-5w6sw"] Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.548444 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfzvp\" (UniqueName: \"kubernetes.io/projected/36404f02-6fc9-4441-b12d-74416c1b04a5-kube-api-access-qfzvp\") pod \"obo-prometheus-operator-668cf9dfbb-5w6sw\" (UID: \"36404f02-6fc9-4441-b12d-74416c1b04a5\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-5w6sw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.591687 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42"] Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.593561 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.597749 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.598636 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-lrcw4" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.608386 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw"] Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.609800 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.621276 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42"] Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.656233 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw"] Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.670517 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bfbaeef9-cd3d-403c-ba71-5516712fea78-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-dtl42\" (UID: \"bfbaeef9-cd3d-403c-ba71-5516712fea78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.670772 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bfbaeef9-cd3d-403c-ba71-5516712fea78-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-dtl42\" (UID: \"bfbaeef9-cd3d-403c-ba71-5516712fea78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.671008 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfzvp\" (UniqueName: \"kubernetes.io/projected/36404f02-6fc9-4441-b12d-74416c1b04a5-kube-api-access-qfzvp\") pod \"obo-prometheus-operator-668cf9dfbb-5w6sw\" (UID: \"36404f02-6fc9-4441-b12d-74416c1b04a5\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-5w6sw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.720145 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfzvp\" (UniqueName: \"kubernetes.io/projected/36404f02-6fc9-4441-b12d-74416c1b04a5-kube-api-access-qfzvp\") pod \"obo-prometheus-operator-668cf9dfbb-5w6sw\" (UID: \"36404f02-6fc9-4441-b12d-74416c1b04a5\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-5w6sw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.776147 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-6tknw\" (UID: \"bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.776310 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bfbaeef9-cd3d-403c-ba71-5516712fea78-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-dtl42\" (UID: \"bfbaeef9-cd3d-403c-ba71-5516712fea78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.776376 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bfbaeef9-cd3d-403c-ba71-5516712fea78-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-dtl42\" (UID: \"bfbaeef9-cd3d-403c-ba71-5516712fea78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.776411 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-6tknw\" (UID: \"bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.782048 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bfbaeef9-cd3d-403c-ba71-5516712fea78-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-dtl42\" (UID: \"bfbaeef9-cd3d-403c-ba71-5516712fea78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.789932 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-ng7j7"] Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.792416 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.795585 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-fp9vn" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.795928 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.797800 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bfbaeef9-cd3d-403c-ba71-5516712fea78-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-dtl42\" (UID: \"bfbaeef9-cd3d-403c-ba71-5516712fea78\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.798315 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-5w6sw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.826584 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-ng7j7"] Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.834835 4932 scope.go:117] "RemoveContainer" containerID="247f6ee0835a7a4048979f720478ab7c1fb9af6965c1c9885c79bd30669dc82f" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.879015 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-6tknw\" (UID: \"bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.879426 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4c23ed9c-a51f-4643-96f4-72e6739729c5-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-ng7j7\" (UID: \"4c23ed9c-a51f-4643-96f4-72e6739729c5\") " pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.879457 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-6tknw\" (UID: \"bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.879546 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vldl\" (UniqueName: \"kubernetes.io/projected/4c23ed9c-a51f-4643-96f4-72e6739729c5-kube-api-access-7vldl\") pod \"observability-operator-d8bb48f5d-ng7j7\" (UID: \"4c23ed9c-a51f-4643-96f4-72e6739729c5\") " pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.884690 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-6tknw\" (UID: \"bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.889252 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-55cff87c94-6tknw\" (UID: \"bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.913899 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.932585 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.981042 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vldl\" (UniqueName: \"kubernetes.io/projected/4c23ed9c-a51f-4643-96f4-72e6739729c5-kube-api-access-7vldl\") pod \"observability-operator-d8bb48f5d-ng7j7\" (UID: \"4c23ed9c-a51f-4643-96f4-72e6739729c5\") " pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.981182 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4c23ed9c-a51f-4643-96f4-72e6739729c5-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-ng7j7\" (UID: \"4c23ed9c-a51f-4643-96f4-72e6739729c5\") " pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" Nov 25 10:36:15 crc kubenswrapper[4932]: I1125 10:36:15.986969 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4c23ed9c-a51f-4643-96f4-72e6739729c5-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-ng7j7\" (UID: \"4c23ed9c-a51f-4643-96f4-72e6739729c5\") " pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.009672 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vldl\" (UniqueName: \"kubernetes.io/projected/4c23ed9c-a51f-4643-96f4-72e6739729c5-kube-api-access-7vldl\") pod \"observability-operator-d8bb48f5d-ng7j7\" (UID: \"4c23ed9c-a51f-4643-96f4-72e6739729c5\") " pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.039391 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-4mcgt"] Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.041065 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-4mcgt" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.044225 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-nnbrb" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.056264 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-4mcgt"] Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.057538 4932 scope.go:117] "RemoveContainer" containerID="f40f62600dee6687ec7594a169524917865615d86973705a738a022f0b1f0db4" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.125779 4932 scope.go:117] "RemoveContainer" containerID="196f0667ad5b62d36da5d4e06507c6a14b093f7813c8d0fc6e9ef30a6a958af2" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.192776 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jwxt\" (UniqueName: \"kubernetes.io/projected/76dd4880-b484-46cb-8bdc-51d5d32a998e-kube-api-access-9jwxt\") pod \"perses-operator-5446b9c989-4mcgt\" (UID: \"76dd4880-b484-46cb-8bdc-51d5d32a998e\") " pod="openshift-operators/perses-operator-5446b9c989-4mcgt" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.192965 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/76dd4880-b484-46cb-8bdc-51d5d32a998e-openshift-service-ca\") pod \"perses-operator-5446b9c989-4mcgt\" (UID: \"76dd4880-b484-46cb-8bdc-51d5d32a998e\") " pod="openshift-operators/perses-operator-5446b9c989-4mcgt" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.295651 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/76dd4880-b484-46cb-8bdc-51d5d32a998e-openshift-service-ca\") pod \"perses-operator-5446b9c989-4mcgt\" (UID: \"76dd4880-b484-46cb-8bdc-51d5d32a998e\") " pod="openshift-operators/perses-operator-5446b9c989-4mcgt" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.295718 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jwxt\" (UniqueName: \"kubernetes.io/projected/76dd4880-b484-46cb-8bdc-51d5d32a998e-kube-api-access-9jwxt\") pod \"perses-operator-5446b9c989-4mcgt\" (UID: \"76dd4880-b484-46cb-8bdc-51d5d32a998e\") " pod="openshift-operators/perses-operator-5446b9c989-4mcgt" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.297778 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/76dd4880-b484-46cb-8bdc-51d5d32a998e-openshift-service-ca\") pod \"perses-operator-5446b9c989-4mcgt\" (UID: \"76dd4880-b484-46cb-8bdc-51d5d32a998e\") " pod="openshift-operators/perses-operator-5446b9c989-4mcgt" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.307215 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.312609 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jwxt\" (UniqueName: \"kubernetes.io/projected/76dd4880-b484-46cb-8bdc-51d5d32a998e-kube-api-access-9jwxt\") pod \"perses-operator-5446b9c989-4mcgt\" (UID: \"76dd4880-b484-46cb-8bdc-51d5d32a998e\") " pod="openshift-operators/perses-operator-5446b9c989-4mcgt" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.369967 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-4mcgt" Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.563069 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-5w6sw"] Nov 25 10:36:16 crc kubenswrapper[4932]: W1125 10:36:16.564665 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36404f02_6fc9_4441_b12d_74416c1b04a5.slice/crio-f9a648a1d3f68329885a59cda569025fceae86f6dd300bc40861b612df1ea314 WatchSource:0}: Error finding container f9a648a1d3f68329885a59cda569025fceae86f6dd300bc40861b612df1ea314: Status 404 returned error can't find the container with id f9a648a1d3f68329885a59cda569025fceae86f6dd300bc40861b612df1ea314 Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.576771 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42"] Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.717659 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw"] Nov 25 10:36:16 crc kubenswrapper[4932]: W1125 10:36:16.741541 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd0fcf73_1ad8_47e2_8131_1b6c5ac8bd01.slice/crio-b1f296fbb44f1083a3f1c76f71c3022ea45ff394e6cb4fe6bfaae4ef88de4287 WatchSource:0}: Error finding container b1f296fbb44f1083a3f1c76f71c3022ea45ff394e6cb4fe6bfaae4ef88de4287: Status 404 returned error can't find the container with id b1f296fbb44f1083a3f1c76f71c3022ea45ff394e6cb4fe6bfaae4ef88de4287 Nov 25 10:36:16 crc kubenswrapper[4932]: I1125 10:36:16.872408 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-ng7j7"] Nov 25 10:36:17 crc kubenswrapper[4932]: I1125 10:36:17.046108 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-4mcgt"] Nov 25 10:36:17 crc kubenswrapper[4932]: W1125 10:36:17.057925 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod76dd4880_b484_46cb_8bdc_51d5d32a998e.slice/crio-ceb50125e32f4c1e01c6e07d081e445a56e1e8ac3c6594b604ce6bbe8993cece WatchSource:0}: Error finding container ceb50125e32f4c1e01c6e07d081e445a56e1e8ac3c6594b604ce6bbe8993cece: Status 404 returned error can't find the container with id ceb50125e32f4c1e01c6e07d081e445a56e1e8ac3c6594b604ce6bbe8993cece Nov 25 10:36:17 crc kubenswrapper[4932]: I1125 10:36:17.545051 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" event={"ID":"4c23ed9c-a51f-4643-96f4-72e6739729c5","Type":"ContainerStarted","Data":"141f6da8468242acc9317637091a7b51be745f7c4a7f61f7cdd6ad5ec1bf53a3"} Nov 25 10:36:17 crc kubenswrapper[4932]: I1125 10:36:17.547115 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-4mcgt" event={"ID":"76dd4880-b484-46cb-8bdc-51d5d32a998e","Type":"ContainerStarted","Data":"ceb50125e32f4c1e01c6e07d081e445a56e1e8ac3c6594b604ce6bbe8993cece"} Nov 25 10:36:17 crc kubenswrapper[4932]: I1125 10:36:17.549301 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw" event={"ID":"bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01","Type":"ContainerStarted","Data":"b1f296fbb44f1083a3f1c76f71c3022ea45ff394e6cb4fe6bfaae4ef88de4287"} Nov 25 10:36:17 crc kubenswrapper[4932]: I1125 10:36:17.557013 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-5w6sw" event={"ID":"36404f02-6fc9-4441-b12d-74416c1b04a5","Type":"ContainerStarted","Data":"f9a648a1d3f68329885a59cda569025fceae86f6dd300bc40861b612df1ea314"} Nov 25 10:36:17 crc kubenswrapper[4932]: I1125 10:36:17.572245 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42" event={"ID":"bfbaeef9-cd3d-403c-ba71-5516712fea78","Type":"ContainerStarted","Data":"b5ef03a235ab7d9ec6192436d3679ca5aeaa3319e7c0dfb7c85f639c8e2020c1"} Nov 25 10:36:25 crc kubenswrapper[4932]: I1125 10:36:25.605496 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:36:25 crc kubenswrapper[4932]: E1125 10:36:25.606110 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.704444 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-4mcgt" event={"ID":"76dd4880-b484-46cb-8bdc-51d5d32a998e","Type":"ContainerStarted","Data":"492e3333ebf5a21b92f67a99bfbde0cbb25669d72aaec340422259eec8e01ab9"} Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.704827 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-4mcgt" Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.709546 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw" event={"ID":"bd0fcf73-1ad8-47e2-8131-1b6c5ac8bd01","Type":"ContainerStarted","Data":"13201f217ed69ccff0485d46e8486e4bb0bcec0b66d70049bfb485cef8e98963"} Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.714433 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-5w6sw" event={"ID":"36404f02-6fc9-4441-b12d-74416c1b04a5","Type":"ContainerStarted","Data":"c02e3f9bb131f40bb0ce757962d279cab9b327dda340c2aa75e9a91b07ce693a"} Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.718124 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42" event={"ID":"bfbaeef9-cd3d-403c-ba71-5516712fea78","Type":"ContainerStarted","Data":"0adb1c214e0d14f26bb9b17fd721c5c8bd05e52f03ecd91b4f93e111649d3f2c"} Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.724032 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" event={"ID":"4c23ed9c-a51f-4643-96f4-72e6739729c5","Type":"ContainerStarted","Data":"be464bbde9590d4d22785dab0cc000c8de2aa595b028da09e54d5998827a10e8"} Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.724415 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.725635 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-4mcgt" podStartSLOduration=3.162834573 podStartE2EDuration="12.72561943s" podCreationTimestamp="2025-11-25 10:36:15 +0000 UTC" firstStartedPulling="2025-11-25 10:36:17.06041572 +0000 UTC m=+6437.186445283" lastFinishedPulling="2025-11-25 10:36:26.623200577 +0000 UTC m=+6446.749230140" observedRunningTime="2025-11-25 10:36:27.722791059 +0000 UTC m=+6447.848820632" watchObservedRunningTime="2025-11-25 10:36:27.72561943 +0000 UTC m=+6447.851648993" Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.745351 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-6tknw" podStartSLOduration=2.99362475 podStartE2EDuration="12.745321673s" podCreationTimestamp="2025-11-25 10:36:15 +0000 UTC" firstStartedPulling="2025-11-25 10:36:16.758257044 +0000 UTC m=+6436.884286607" lastFinishedPulling="2025-11-25 10:36:26.509953967 +0000 UTC m=+6446.635983530" observedRunningTime="2025-11-25 10:36:27.736966624 +0000 UTC m=+6447.862996197" watchObservedRunningTime="2025-11-25 10:36:27.745321673 +0000 UTC m=+6447.871351246" Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.762884 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-5w6sw" podStartSLOduration=2.823142653 podStartE2EDuration="12.762850265s" podCreationTimestamp="2025-11-25 10:36:15 +0000 UTC" firstStartedPulling="2025-11-25 10:36:16.570244885 +0000 UTC m=+6436.696274448" lastFinishedPulling="2025-11-25 10:36:26.509952487 +0000 UTC m=+6446.635982060" observedRunningTime="2025-11-25 10:36:27.759582941 +0000 UTC m=+6447.885612514" watchObservedRunningTime="2025-11-25 10:36:27.762850265 +0000 UTC m=+6447.888879828" Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.763514 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.796202 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-55cff87c94-dtl42" podStartSLOduration=2.781094249 podStartE2EDuration="12.796167128s" podCreationTimestamp="2025-11-25 10:36:15 +0000 UTC" firstStartedPulling="2025-11-25 10:36:16.595808836 +0000 UTC m=+6436.721838399" lastFinishedPulling="2025-11-25 10:36:26.610881715 +0000 UTC m=+6446.736911278" observedRunningTime="2025-11-25 10:36:27.792279107 +0000 UTC m=+6447.918308670" watchObservedRunningTime="2025-11-25 10:36:27.796167128 +0000 UTC m=+6447.922196691" Nov 25 10:36:27 crc kubenswrapper[4932]: I1125 10:36:27.832503 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-ng7j7" podStartSLOduration=3.012233982 podStartE2EDuration="12.832477986s" podCreationTimestamp="2025-11-25 10:36:15 +0000 UTC" firstStartedPulling="2025-11-25 10:36:16.87449711 +0000 UTC m=+6437.000526673" lastFinishedPulling="2025-11-25 10:36:26.694741114 +0000 UTC m=+6446.820770677" observedRunningTime="2025-11-25 10:36:27.82629323 +0000 UTC m=+6447.952322813" watchObservedRunningTime="2025-11-25 10:36:27.832477986 +0000 UTC m=+6447.958507559" Nov 25 10:36:36 crc kubenswrapper[4932]: I1125 10:36:36.055767 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-njdms"] Nov 25 10:36:36 crc kubenswrapper[4932]: I1125 10:36:36.071808 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-f085-account-create-x4kwg"] Nov 25 10:36:36 crc kubenswrapper[4932]: I1125 10:36:36.086545 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-f085-account-create-x4kwg"] Nov 25 10:36:36 crc kubenswrapper[4932]: I1125 10:36:36.097891 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-njdms"] Nov 25 10:36:36 crc kubenswrapper[4932]: I1125 10:36:36.373640 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-4mcgt" Nov 25 10:36:36 crc kubenswrapper[4932]: I1125 10:36:36.613808 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:36:36 crc kubenswrapper[4932]: E1125 10:36:36.614067 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:36:36 crc kubenswrapper[4932]: I1125 10:36:36.624438 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18658aa6-a9b1-4277-80ba-304ac1a91ab0" path="/var/lib/kubelet/pods/18658aa6-a9b1-4277-80ba-304ac1a91ab0/volumes" Nov 25 10:36:36 crc kubenswrapper[4932]: I1125 10:36:36.625635 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83dd0ea2-f060-47a7-822e-5f6a8f605df5" path="/var/lib/kubelet/pods/83dd0ea2-f060-47a7-822e-5f6a8f605df5/volumes" Nov 25 10:36:38 crc kubenswrapper[4932]: I1125 10:36:38.841661 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 25 10:36:38 crc kubenswrapper[4932]: I1125 10:36:38.842165 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="0dde41a7-5aa6-486f-bedf-414e833c60bf" containerName="openstackclient" containerID="cri-o://428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a" gracePeriod=2 Nov 25 10:36:38 crc kubenswrapper[4932]: I1125 10:36:38.861086 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 25 10:36:38 crc kubenswrapper[4932]: I1125 10:36:38.896153 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 10:36:38 crc kubenswrapper[4932]: E1125 10:36:38.896933 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0dde41a7-5aa6-486f-bedf-414e833c60bf" containerName="openstackclient" Nov 25 10:36:38 crc kubenswrapper[4932]: I1125 10:36:38.896949 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="0dde41a7-5aa6-486f-bedf-414e833c60bf" containerName="openstackclient" Nov 25 10:36:38 crc kubenswrapper[4932]: I1125 10:36:38.897210 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="0dde41a7-5aa6-486f-bedf-414e833c60bf" containerName="openstackclient" Nov 25 10:36:38 crc kubenswrapper[4932]: I1125 10:36:38.897976 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:36:38 crc kubenswrapper[4932]: I1125 10:36:38.963540 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 10:36:38 crc kubenswrapper[4932]: I1125 10:36:38.977932 4932 status_manager.go:875] "Failed to update status for pod" pod="openstack/openstackclient" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19bc3659-7540-43d5-b382-f12953ee9100\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T10:36:38Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T10:36:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T10:36:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T10:36:38Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/podified-antelope-centos9/openstack-openstackclient@sha256:776211111e2e6493706dbc49a3ba44f31d1b947919313ed3a0f35810e304ec52\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"openstackclient\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/clouds.yaml\\\",\\\"name\\\":\\\"openstack-config\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/secure.yaml\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/cloudrc\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem\\\",\\\"name\\\":\\\"combined-ca-bundle\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s7qnc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T10:36:38Z\\\"}}\" for pod \"openstack\"/\"openstackclient\": pods \"openstackclient\" not found" Nov 25 10:36:38 crc kubenswrapper[4932]: I1125 10:36:38.981892 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="0dde41a7-5aa6-486f-bedf-414e833c60bf" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.025860 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-combined-ca-bundle\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.025959 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config-secret\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.026024 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.026046 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s7qnc\" (UniqueName: \"kubernetes.io/projected/19bc3659-7540-43d5-b382-f12953ee9100-kube-api-access-s7qnc\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.040252 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 25 10:36:39 crc kubenswrapper[4932]: E1125 10:36:39.041275 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle kube-api-access-s7qnc openstack-config openstack-config-secret], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="19bc3659-7540-43d5-b382-f12953ee9100" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.074450 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.117249 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.118673 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.129762 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7qnc\" (UniqueName: \"kubernetes.io/projected/19bc3659-7540-43d5-b382-f12953ee9100-kube-api-access-s7qnc\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.129957 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-combined-ca-bundle\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.130030 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config-secret\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.130080 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.131231 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 10:36:39 crc kubenswrapper[4932]: E1125 10:36:39.136681 4932 projected.go:194] Error preparing data for projected volume kube-api-access-s7qnc for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (19bc3659-7540-43d5-b382-f12953ee9100) does not match the UID in record. The object might have been deleted and then recreated Nov 25 10:36:39 crc kubenswrapper[4932]: E1125 10:36:39.136750 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/19bc3659-7540-43d5-b382-f12953ee9100-kube-api-access-s7qnc podName:19bc3659-7540-43d5-b382-f12953ee9100 nodeName:}" failed. No retries permitted until 2025-11-25 10:36:39.636731239 +0000 UTC m=+6459.762760802 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s7qnc" (UniqueName: "kubernetes.io/projected/19bc3659-7540-43d5-b382-f12953ee9100-kube-api-access-s7qnc") pod "openstackclient" (UID: "19bc3659-7540-43d5-b382-f12953ee9100") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (19bc3659-7540-43d5-b382-f12953ee9100) does not match the UID in record. The object might have been deleted and then recreated Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.136971 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.146408 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-combined-ca-bundle\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.171432 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config-secret\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.171517 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.181399 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.186639 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="19bc3659-7540-43d5-b382-f12953ee9100" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.187012 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-bm44s" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.197921 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.231619 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hf9p\" (UniqueName: \"kubernetes.io/projected/c4644bf8-6142-462d-8c94-c07283f431a9-kube-api-access-7hf9p\") pod \"kube-state-metrics-0\" (UID: \"c4644bf8-6142-462d-8c94-c07283f431a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.231722 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config-secret\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.231748 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.231822 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f58j\" (UniqueName: \"kubernetes.io/projected/b90df741-01e1-480d-89bf-040a0f6bef5f-kube-api-access-4f58j\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.231954 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.335433 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.335810 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hf9p\" (UniqueName: \"kubernetes.io/projected/c4644bf8-6142-462d-8c94-c07283f431a9-kube-api-access-7hf9p\") pod \"kube-state-metrics-0\" (UID: \"c4644bf8-6142-462d-8c94-c07283f431a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.335869 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config-secret\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.335891 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.335950 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f58j\" (UniqueName: \"kubernetes.io/projected/b90df741-01e1-480d-89bf-040a0f6bef5f-kube-api-access-4f58j\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.340629 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.352683 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.359494 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config-secret\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.363632 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f58j\" (UniqueName: \"kubernetes.io/projected/b90df741-01e1-480d-89bf-040a0f6bef5f-kube-api-access-4f58j\") pod \"openstackclient\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.399001 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hf9p\" (UniqueName: \"kubernetes.io/projected/c4644bf8-6142-462d-8c94-c07283f431a9-kube-api-access-7hf9p\") pod \"kube-state-metrics-0\" (UID: \"c4644bf8-6142-462d-8c94-c07283f431a9\") " pod="openstack/kube-state-metrics-0" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.552200 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.578752 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.647442 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s7qnc\" (UniqueName: \"kubernetes.io/projected/19bc3659-7540-43d5-b382-f12953ee9100-kube-api-access-s7qnc\") pod \"openstackclient\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: E1125 10:36:39.652226 4932 projected.go:194] Error preparing data for projected volume kube-api-access-s7qnc for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (19bc3659-7540-43d5-b382-f12953ee9100) does not match the UID in record. The object might have been deleted and then recreated Nov 25 10:36:39 crc kubenswrapper[4932]: E1125 10:36:39.652349 4932 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/19bc3659-7540-43d5-b382-f12953ee9100-kube-api-access-s7qnc podName:19bc3659-7540-43d5-b382-f12953ee9100 nodeName:}" failed. No retries permitted until 2025-11-25 10:36:40.652311731 +0000 UTC m=+6460.778341294 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s7qnc" (UniqueName: "kubernetes.io/projected/19bc3659-7540-43d5-b382-f12953ee9100-kube-api-access-s7qnc") pod "openstackclient" (UID: "19bc3659-7540-43d5-b382-f12953ee9100") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (19bc3659-7540-43d5-b382-f12953ee9100) does not match the UID in record. The object might have been deleted and then recreated Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.894745 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.899602 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="19bc3659-7540-43d5-b382-f12953ee9100" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.954981 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.957942 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="19bc3659-7540-43d5-b382-f12953ee9100" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.957988 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.960318 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.966912 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.967388 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.967518 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-nx627" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.967515 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.968643 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Nov 25 10:36:39 crc kubenswrapper[4932]: I1125 10:36:39.969147 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.056086 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config\") pod \"19bc3659-7540-43d5-b382-f12953ee9100\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.056250 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config-secret\") pod \"19bc3659-7540-43d5-b382-f12953ee9100\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.056485 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-combined-ca-bundle\") pod \"19bc3659-7540-43d5-b382-f12953ee9100\" (UID: \"19bc3659-7540-43d5-b382-f12953ee9100\") " Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.057037 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/75c2e997-95e0-408c-b813-ba928f299bc2-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.057142 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/75c2e997-95e0-408c-b813-ba928f299bc2-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.057168 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/75c2e997-95e0-408c-b813-ba928f299bc2-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.057242 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/75c2e997-95e0-408c-b813-ba928f299bc2-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.057305 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/75c2e997-95e0-408c-b813-ba928f299bc2-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.057362 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxdw9\" (UniqueName: \"kubernetes.io/projected/75c2e997-95e0-408c-b813-ba928f299bc2-kube-api-access-xxdw9\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.057388 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/75c2e997-95e0-408c-b813-ba928f299bc2-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.057499 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s7qnc\" (UniqueName: \"kubernetes.io/projected/19bc3659-7540-43d5-b382-f12953ee9100-kube-api-access-s7qnc\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.057861 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "19bc3659-7540-43d5-b382-f12953ee9100" (UID: "19bc3659-7540-43d5-b382-f12953ee9100"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.072426 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19bc3659-7540-43d5-b382-f12953ee9100" (UID: "19bc3659-7540-43d5-b382-f12953ee9100"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.072463 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "19bc3659-7540-43d5-b382-f12953ee9100" (UID: "19bc3659-7540-43d5-b382-f12953ee9100"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.163296 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/75c2e997-95e0-408c-b813-ba928f299bc2-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.163372 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/75c2e997-95e0-408c-b813-ba928f299bc2-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.163392 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/75c2e997-95e0-408c-b813-ba928f299bc2-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.163432 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/75c2e997-95e0-408c-b813-ba928f299bc2-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.163477 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/75c2e997-95e0-408c-b813-ba928f299bc2-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.163525 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/75c2e997-95e0-408c-b813-ba928f299bc2-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.163541 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxdw9\" (UniqueName: \"kubernetes.io/projected/75c2e997-95e0-408c-b813-ba928f299bc2-kube-api-access-xxdw9\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.163612 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.163624 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.163634 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19bc3659-7540-43d5-b382-f12953ee9100-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.172122 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/75c2e997-95e0-408c-b813-ba928f299bc2-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.172827 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/75c2e997-95e0-408c-b813-ba928f299bc2-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.173094 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/75c2e997-95e0-408c-b813-ba928f299bc2-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.173252 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/75c2e997-95e0-408c-b813-ba928f299bc2-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.180167 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/75c2e997-95e0-408c-b813-ba928f299bc2-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.186421 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxdw9\" (UniqueName: \"kubernetes.io/projected/75c2e997-95e0-408c-b813-ba928f299bc2-kube-api-access-xxdw9\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.196888 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/75c2e997-95e0-408c-b813-ba928f299bc2-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"75c2e997-95e0-408c-b813-ba928f299bc2\") " pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.318732 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.517843 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.522424 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.531968 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.532319 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.532559 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.532664 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.532808 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.532941 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-2zs26" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.596818 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/39fa75e8-70c4-410e-b828-37eee9a1d63f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.597032 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.597130 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.597349 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8llt6\" (UniqueName: \"kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-kube-api-access-8llt6\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.597522 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/39fa75e8-70c4-410e-b828-37eee9a1d63f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.597622 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.597722 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.597816 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-config\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.598422 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.679212 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19bc3659-7540-43d5-b382-f12953ee9100" path="/var/lib/kubelet/pods/19bc3659-7540-43d5-b382-f12953ee9100/volumes" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.680105 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.698944 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.710371 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.710549 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8llt6\" (UniqueName: \"kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-kube-api-access-8llt6\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.710709 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/39fa75e8-70c4-410e-b828-37eee9a1d63f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.710739 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.710775 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.710811 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-config\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.711170 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/39fa75e8-70c4-410e-b828-37eee9a1d63f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.712029 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/39fa75e8-70c4-410e-b828-37eee9a1d63f-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.716130 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.724332 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.725379 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/39fa75e8-70c4-410e-b828-37eee9a1d63f-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.729311 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.729901 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-config\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.736592 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.763849 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.763911 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9ea084770c02ed5a7ec854914453e15e6a92564817ab4fa949bbc517dd437f23/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.763974 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8llt6\" (UniqueName: \"kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-kube-api-access-8llt6\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.945005 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\") pod \"prometheus-metric-storage-0\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.946178 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b90df741-01e1-480d-89bf-040a0f6bef5f","Type":"ContainerStarted","Data":"d99bce64914349e4d2e19a5193abacc6dfe32f0795457a4639759b9b2e324890"} Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.985437 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:36:40 crc kubenswrapper[4932]: I1125 10:36:40.986301 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c4644bf8-6142-462d-8c94-c07283f431a9","Type":"ContainerStarted","Data":"16f2d20086f5431a9901746b2bb19d6d0a165dddca7ec8d54e4a515855917e50"} Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.002792 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="19bc3659-7540-43d5-b382-f12953ee9100" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.241843 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.383058 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.818963 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.822618 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="0dde41a7-5aa6-486f-bedf-414e833c60bf" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.915432 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.966802 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config-secret\") pod \"0dde41a7-5aa6-486f-bedf-414e833c60bf\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.966878 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config\") pod \"0dde41a7-5aa6-486f-bedf-414e833c60bf\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.966921 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h6bpf\" (UniqueName: \"kubernetes.io/projected/0dde41a7-5aa6-486f-bedf-414e833c60bf-kube-api-access-h6bpf\") pod \"0dde41a7-5aa6-486f-bedf-414e833c60bf\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.966982 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-combined-ca-bundle\") pod \"0dde41a7-5aa6-486f-bedf-414e833c60bf\" (UID: \"0dde41a7-5aa6-486f-bedf-414e833c60bf\") " Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.972019 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dde41a7-5aa6-486f-bedf-414e833c60bf-kube-api-access-h6bpf" (OuterVolumeSpecName: "kube-api-access-h6bpf") pod "0dde41a7-5aa6-486f-bedf-414e833c60bf" (UID: "0dde41a7-5aa6-486f-bedf-414e833c60bf"). InnerVolumeSpecName "kube-api-access-h6bpf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.996763 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "0dde41a7-5aa6-486f-bedf-414e833c60bf" (UID: "0dde41a7-5aa6-486f-bedf-414e833c60bf"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.997176 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c4644bf8-6142-462d-8c94-c07283f431a9","Type":"ContainerStarted","Data":"4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab"} Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.997261 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 10:36:41 crc kubenswrapper[4932]: I1125 10:36:41.998744 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"39fa75e8-70c4-410e-b828-37eee9a1d63f","Type":"ContainerStarted","Data":"dda8f7ab07b98042c2ccf86b9a73fc615c47002503e6d98ee4854a988de0dd99"} Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.001709 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0dde41a7-5aa6-486f-bedf-414e833c60bf" (UID: "0dde41a7-5aa6-486f-bedf-414e833c60bf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.001796 4932 generic.go:334] "Generic (PLEG): container finished" podID="0dde41a7-5aa6-486f-bedf-414e833c60bf" containerID="428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a" exitCode=137 Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.001851 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.001876 4932 scope.go:117] "RemoveContainer" containerID="428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.004152 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"75c2e997-95e0-408c-b813-ba928f299bc2","Type":"ContainerStarted","Data":"c809cc13ba8175b6e1318a9de9faa804ee727b26bb332bfaec459db34c32b39e"} Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.009880 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b90df741-01e1-480d-89bf-040a0f6bef5f","Type":"ContainerStarted","Data":"0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc"} Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.017827 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.169564903 podStartE2EDuration="3.017781842s" podCreationTimestamp="2025-11-25 10:36:39 +0000 UTC" firstStartedPulling="2025-11-25 10:36:40.585708538 +0000 UTC m=+6460.711738101" lastFinishedPulling="2025-11-25 10:36:41.433925477 +0000 UTC m=+6461.559955040" observedRunningTime="2025-11-25 10:36:42.010133403 +0000 UTC m=+6462.136162966" watchObservedRunningTime="2025-11-25 10:36:42.017781842 +0000 UTC m=+6462.143811405" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.033580 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="0dde41a7-5aa6-486f-bedf-414e833c60bf" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.041443 4932 scope.go:117] "RemoveContainer" containerID="428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a" Nov 25 10:36:42 crc kubenswrapper[4932]: E1125 10:36:42.041863 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a\": container with ID starting with 428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a not found: ID does not exist" containerID="428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.041904 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a"} err="failed to get container status \"428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a\": rpc error: code = NotFound desc = could not find container \"428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a\": container with ID starting with 428ffc82cb613e03d43cebe96eaa869b4cf7009e8b445f2824e1c832c47aae7a not found: ID does not exist" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.042639 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=4.042628713 podStartE2EDuration="4.042628713s" podCreationTimestamp="2025-11-25 10:36:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:36:42.025583795 +0000 UTC m=+6462.151613358" watchObservedRunningTime="2025-11-25 10:36:42.042628713 +0000 UTC m=+6462.168658276" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.058455 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "0dde41a7-5aa6-486f-bedf-414e833c60bf" (UID: "0dde41a7-5aa6-486f-bedf-414e833c60bf"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.069627 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.069658 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.069669 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0dde41a7-5aa6-486f-bedf-414e833c60bf-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.069678 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h6bpf\" (UniqueName: \"kubernetes.io/projected/0dde41a7-5aa6-486f-bedf-414e833c60bf-kube-api-access-h6bpf\") on node \"crc\" DevicePath \"\"" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.321169 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="0dde41a7-5aa6-486f-bedf-414e833c60bf" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" Nov 25 10:36:42 crc kubenswrapper[4932]: I1125 10:36:42.617931 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dde41a7-5aa6-486f-bedf-414e833c60bf" path="/var/lib/kubelet/pods/0dde41a7-5aa6-486f-bedf-414e833c60bf/volumes" Nov 25 10:36:44 crc kubenswrapper[4932]: I1125 10:36:44.028328 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-q2944"] Nov 25 10:36:44 crc kubenswrapper[4932]: I1125 10:36:44.037959 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-q2944"] Nov 25 10:36:44 crc kubenswrapper[4932]: I1125 10:36:44.620892 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="905207e2-07d5-4d64-bd4e-e6459f5e9827" path="/var/lib/kubelet/pods/905207e2-07d5-4d64-bd4e-e6459f5e9827/volumes" Nov 25 10:36:47 crc kubenswrapper[4932]: I1125 10:36:47.060477 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"39fa75e8-70c4-410e-b828-37eee9a1d63f","Type":"ContainerStarted","Data":"ad69d8c7f6ece79945df981c66a5e2051396e1fba799bac7dc9b442b80096f40"} Nov 25 10:36:47 crc kubenswrapper[4932]: I1125 10:36:47.062551 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"75c2e997-95e0-408c-b813-ba928f299bc2","Type":"ContainerStarted","Data":"c9295f1074193f23144044c04edb79ba2cc9b159a5770bb6b12132f9381680b5"} Nov 25 10:36:47 crc kubenswrapper[4932]: I1125 10:36:47.607792 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:36:47 crc kubenswrapper[4932]: E1125 10:36:47.608580 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:36:49 crc kubenswrapper[4932]: I1125 10:36:49.584401 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 10:36:54 crc kubenswrapper[4932]: I1125 10:36:54.153015 4932 generic.go:334] "Generic (PLEG): container finished" podID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerID="ad69d8c7f6ece79945df981c66a5e2051396e1fba799bac7dc9b442b80096f40" exitCode=0 Nov 25 10:36:54 crc kubenswrapper[4932]: I1125 10:36:54.154300 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"39fa75e8-70c4-410e-b828-37eee9a1d63f","Type":"ContainerDied","Data":"ad69d8c7f6ece79945df981c66a5e2051396e1fba799bac7dc9b442b80096f40"} Nov 25 10:36:54 crc kubenswrapper[4932]: I1125 10:36:54.157135 4932 generic.go:334] "Generic (PLEG): container finished" podID="75c2e997-95e0-408c-b813-ba928f299bc2" containerID="c9295f1074193f23144044c04edb79ba2cc9b159a5770bb6b12132f9381680b5" exitCode=0 Nov 25 10:36:54 crc kubenswrapper[4932]: I1125 10:36:54.157182 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"75c2e997-95e0-408c-b813-ba928f299bc2","Type":"ContainerDied","Data":"c9295f1074193f23144044c04edb79ba2cc9b159a5770bb6b12132f9381680b5"} Nov 25 10:36:58 crc kubenswrapper[4932]: I1125 10:36:58.606727 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:36:58 crc kubenswrapper[4932]: E1125 10:36:58.607497 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:37:01 crc kubenswrapper[4932]: I1125 10:37:01.221506 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"75c2e997-95e0-408c-b813-ba928f299bc2","Type":"ContainerStarted","Data":"05ba19a42c757f913608a14e1cdea16e78533e93635ff8e342faee94672c25d6"} Nov 25 10:37:01 crc kubenswrapper[4932]: I1125 10:37:01.223724 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"39fa75e8-70c4-410e-b828-37eee9a1d63f","Type":"ContainerStarted","Data":"53aaa87d4cdf8040bf21065da8a0923b75e365b809321c0d42057556ce95aace"} Nov 25 10:37:04 crc kubenswrapper[4932]: I1125 10:37:04.252985 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"75c2e997-95e0-408c-b813-ba928f299bc2","Type":"ContainerStarted","Data":"7d0900dbbc31829405d7a5960f7e9ffaa6f37012bef5d027b8abed04fda47888"} Nov 25 10:37:04 crc kubenswrapper[4932]: I1125 10:37:04.253958 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Nov 25 10:37:04 crc kubenswrapper[4932]: I1125 10:37:04.257863 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Nov 25 10:37:04 crc kubenswrapper[4932]: I1125 10:37:04.280736 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=6.585423369 podStartE2EDuration="25.280718951s" podCreationTimestamp="2025-11-25 10:36:39 +0000 UTC" firstStartedPulling="2025-11-25 10:36:41.41305511 +0000 UTC m=+6461.539084673" lastFinishedPulling="2025-11-25 10:37:00.108350702 +0000 UTC m=+6480.234380255" observedRunningTime="2025-11-25 10:37:04.273033661 +0000 UTC m=+6484.399063244" watchObservedRunningTime="2025-11-25 10:37:04.280718951 +0000 UTC m=+6484.406748514" Nov 25 10:37:05 crc kubenswrapper[4932]: I1125 10:37:05.269036 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"39fa75e8-70c4-410e-b828-37eee9a1d63f","Type":"ContainerStarted","Data":"97550ed88fc51beee5f152521dfb6767671e19c9c1308ab47349030fdbc31f4f"} Nov 25 10:37:07 crc kubenswrapper[4932]: I1125 10:37:07.289639 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"39fa75e8-70c4-410e-b828-37eee9a1d63f","Type":"ContainerStarted","Data":"0d4a60740bfae56789929dac56781b26df30ba4ec23c9e5f6b983cac3f931e7d"} Nov 25 10:37:07 crc kubenswrapper[4932]: I1125 10:37:07.327561 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=3.420777731 podStartE2EDuration="28.327535785s" podCreationTimestamp="2025-11-25 10:36:39 +0000 UTC" firstStartedPulling="2025-11-25 10:36:41.926097799 +0000 UTC m=+6462.052127362" lastFinishedPulling="2025-11-25 10:37:06.832855853 +0000 UTC m=+6486.958885416" observedRunningTime="2025-11-25 10:37:07.318395674 +0000 UTC m=+6487.444425267" watchObservedRunningTime="2025-11-25 10:37:07.327535785 +0000 UTC m=+6487.453565358" Nov 25 10:37:11 crc kubenswrapper[4932]: I1125 10:37:11.243211 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:11 crc kubenswrapper[4932]: I1125 10:37:11.244140 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:11 crc kubenswrapper[4932]: I1125 10:37:11.248505 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:11 crc kubenswrapper[4932]: I1125 10:37:11.328996 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.335823 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.336058 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" containerName="openstackclient" containerID="cri-o://0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc" gracePeriod=2 Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.350446 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.379554 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 10:37:12 crc kubenswrapper[4932]: E1125 10:37:12.380058 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" containerName="openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.380078 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" containerName="openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.380389 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" containerName="openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.381412 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.384800 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="b90df741-01e1-480d-89bf-040a0f6bef5f" podUID="f9e1331e-adee-4b7b-a604-6fac318106dc" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.391088 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.486461 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54s5w\" (UniqueName: \"kubernetes.io/projected/f9e1331e-adee-4b7b-a604-6fac318106dc-kube-api-access-54s5w\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.486617 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f9e1331e-adee-4b7b-a604-6fac318106dc-openstack-config\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.486925 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f9e1331e-adee-4b7b-a604-6fac318106dc-openstack-config-secret\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.486970 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9e1331e-adee-4b7b-a604-6fac318106dc-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.588625 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f9e1331e-adee-4b7b-a604-6fac318106dc-openstack-config\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.588855 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f9e1331e-adee-4b7b-a604-6fac318106dc-openstack-config-secret\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.588882 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9e1331e-adee-4b7b-a604-6fac318106dc-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.588930 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54s5w\" (UniqueName: \"kubernetes.io/projected/f9e1331e-adee-4b7b-a604-6fac318106dc-kube-api-access-54s5w\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.590110 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/f9e1331e-adee-4b7b-a604-6fac318106dc-openstack-config\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.595008 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9e1331e-adee-4b7b-a604-6fac318106dc-combined-ca-bundle\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.595448 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/f9e1331e-adee-4b7b-a604-6fac318106dc-openstack-config-secret\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.607059 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:37:12 crc kubenswrapper[4932]: E1125 10:37:12.607340 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.618900 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54s5w\" (UniqueName: \"kubernetes.io/projected/f9e1331e-adee-4b7b-a604-6fac318106dc-kube-api-access-54s5w\") pod \"openstackclient\" (UID: \"f9e1331e-adee-4b7b-a604-6fac318106dc\") " pod="openstack/openstackclient" Nov 25 10:37:12 crc kubenswrapper[4932]: I1125 10:37:12.710568 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:37:13 crc kubenswrapper[4932]: I1125 10:37:13.302090 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 10:37:13 crc kubenswrapper[4932]: I1125 10:37:13.347484 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"f9e1331e-adee-4b7b-a604-6fac318106dc","Type":"ContainerStarted","Data":"ce04f2232c881aa94b908945f330dbed97ddef74a5f85cd2549fd3e5d031f03a"} Nov 25 10:37:13 crc kubenswrapper[4932]: I1125 10:37:13.637687 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:37:13 crc kubenswrapper[4932]: I1125 10:37:13.638053 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="prometheus" containerID="cri-o://53aaa87d4cdf8040bf21065da8a0923b75e365b809321c0d42057556ce95aace" gracePeriod=600 Nov 25 10:37:13 crc kubenswrapper[4932]: I1125 10:37:13.638462 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="thanos-sidecar" containerID="cri-o://0d4a60740bfae56789929dac56781b26df30ba4ec23c9e5f6b983cac3f931e7d" gracePeriod=600 Nov 25 10:37:13 crc kubenswrapper[4932]: I1125 10:37:13.638509 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="config-reloader" containerID="cri-o://97550ed88fc51beee5f152521dfb6767671e19c9c1308ab47349030fdbc31f4f" gracePeriod=600 Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.381371 4932 generic.go:334] "Generic (PLEG): container finished" podID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerID="0d4a60740bfae56789929dac56781b26df30ba4ec23c9e5f6b983cac3f931e7d" exitCode=0 Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.381700 4932 generic.go:334] "Generic (PLEG): container finished" podID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerID="97550ed88fc51beee5f152521dfb6767671e19c9c1308ab47349030fdbc31f4f" exitCode=0 Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.381712 4932 generic.go:334] "Generic (PLEG): container finished" podID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerID="53aaa87d4cdf8040bf21065da8a0923b75e365b809321c0d42057556ce95aace" exitCode=0 Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.381539 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"39fa75e8-70c4-410e-b828-37eee9a1d63f","Type":"ContainerDied","Data":"0d4a60740bfae56789929dac56781b26df30ba4ec23c9e5f6b983cac3f931e7d"} Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.381801 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"39fa75e8-70c4-410e-b828-37eee9a1d63f","Type":"ContainerDied","Data":"97550ed88fc51beee5f152521dfb6767671e19c9c1308ab47349030fdbc31f4f"} Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.381814 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"39fa75e8-70c4-410e-b828-37eee9a1d63f","Type":"ContainerDied","Data":"53aaa87d4cdf8040bf21065da8a0923b75e365b809321c0d42057556ce95aace"} Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.386048 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"f9e1331e-adee-4b7b-a604-6fac318106dc","Type":"ContainerStarted","Data":"d10bf0b957b3300faa3ec2be8cf001c848deaae635a86bc4bcbfa3ee612efdeb"} Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.402814 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.40279448 podStartE2EDuration="2.40279448s" podCreationTimestamp="2025-11-25 10:37:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:37:14.401837813 +0000 UTC m=+6494.527867406" watchObservedRunningTime="2025-11-25 10:37:14.40279448 +0000 UTC m=+6494.528824043" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.722293 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.729302 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.857694 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8llt6\" (UniqueName: \"kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-kube-api-access-8llt6\") pod \"39fa75e8-70c4-410e-b828-37eee9a1d63f\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.858481 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-web-config\") pod \"39fa75e8-70c4-410e-b828-37eee9a1d63f\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.858627 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-tls-assets\") pod \"39fa75e8-70c4-410e-b828-37eee9a1d63f\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.858795 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config\") pod \"b90df741-01e1-480d-89bf-040a0f6bef5f\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.859327 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config-secret\") pod \"b90df741-01e1-480d-89bf-040a0f6bef5f\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.859473 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/39fa75e8-70c4-410e-b828-37eee9a1d63f-prometheus-metric-storage-rulefiles-0\") pod \"39fa75e8-70c4-410e-b828-37eee9a1d63f\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.859589 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-combined-ca-bundle\") pod \"b90df741-01e1-480d-89bf-040a0f6bef5f\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.859770 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\") pod \"39fa75e8-70c4-410e-b828-37eee9a1d63f\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.859941 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/39fa75e8-70c4-410e-b828-37eee9a1d63f-config-out\") pod \"39fa75e8-70c4-410e-b828-37eee9a1d63f\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.860121 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-config\") pod \"39fa75e8-70c4-410e-b828-37eee9a1d63f\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.860300 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4f58j\" (UniqueName: \"kubernetes.io/projected/b90df741-01e1-480d-89bf-040a0f6bef5f-kube-api-access-4f58j\") pod \"b90df741-01e1-480d-89bf-040a0f6bef5f\" (UID: \"b90df741-01e1-480d-89bf-040a0f6bef5f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.860417 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-thanos-prometheus-http-client-file\") pod \"39fa75e8-70c4-410e-b828-37eee9a1d63f\" (UID: \"39fa75e8-70c4-410e-b828-37eee9a1d63f\") " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.861632 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39fa75e8-70c4-410e-b828-37eee9a1d63f-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "39fa75e8-70c4-410e-b828-37eee9a1d63f" (UID: "39fa75e8-70c4-410e-b828-37eee9a1d63f"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.864271 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "39fa75e8-70c4-410e-b828-37eee9a1d63f" (UID: "39fa75e8-70c4-410e-b828-37eee9a1d63f"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.865232 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-kube-api-access-8llt6" (OuterVolumeSpecName: "kube-api-access-8llt6") pod "39fa75e8-70c4-410e-b828-37eee9a1d63f" (UID: "39fa75e8-70c4-410e-b828-37eee9a1d63f"). InnerVolumeSpecName "kube-api-access-8llt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.866981 4932 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/39fa75e8-70c4-410e-b828-37eee9a1d63f-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.867633 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8llt6\" (UniqueName: \"kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-kube-api-access-8llt6\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.867731 4932 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/39fa75e8-70c4-410e-b828-37eee9a1d63f-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.868665 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b90df741-01e1-480d-89bf-040a0f6bef5f-kube-api-access-4f58j" (OuterVolumeSpecName: "kube-api-access-4f58j") pod "b90df741-01e1-480d-89bf-040a0f6bef5f" (UID: "b90df741-01e1-480d-89bf-040a0f6bef5f"). InnerVolumeSpecName "kube-api-access-4f58j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.873172 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "39fa75e8-70c4-410e-b828-37eee9a1d63f" (UID: "39fa75e8-70c4-410e-b828-37eee9a1d63f"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.873219 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-config" (OuterVolumeSpecName: "config") pod "39fa75e8-70c4-410e-b828-37eee9a1d63f" (UID: "39fa75e8-70c4-410e-b828-37eee9a1d63f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.873501 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/39fa75e8-70c4-410e-b828-37eee9a1d63f-config-out" (OuterVolumeSpecName: "config-out") pod "39fa75e8-70c4-410e-b828-37eee9a1d63f" (UID: "39fa75e8-70c4-410e-b828-37eee9a1d63f"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.885724 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "39fa75e8-70c4-410e-b828-37eee9a1d63f" (UID: "39fa75e8-70c4-410e-b828-37eee9a1d63f"). InnerVolumeSpecName "pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.913858 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "b90df741-01e1-480d-89bf-040a0f6bef5f" (UID: "b90df741-01e1-480d-89bf-040a0f6bef5f"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.926231 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-web-config" (OuterVolumeSpecName: "web-config") pod "39fa75e8-70c4-410e-b828-37eee9a1d63f" (UID: "39fa75e8-70c4-410e-b828-37eee9a1d63f"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.937751 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b90df741-01e1-480d-89bf-040a0f6bef5f" (UID: "b90df741-01e1-480d-89bf-040a0f6bef5f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.941215 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "b90df741-01e1-480d-89bf-040a0f6bef5f" (UID: "b90df741-01e1-480d-89bf-040a0f6bef5f"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.970266 4932 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-web-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.970657 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.970761 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.970880 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b90df741-01e1-480d-89bf-040a0f6bef5f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.970983 4932 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\") on node \"crc\" " Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.971050 4932 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/39fa75e8-70c4-410e-b828-37eee9a1d63f-config-out\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.971132 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.971311 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4f58j\" (UniqueName: \"kubernetes.io/projected/b90df741-01e1-480d-89bf-040a0f6bef5f-kube-api-access-4f58j\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:14 crc kubenswrapper[4932]: I1125 10:37:14.971398 4932 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/39fa75e8-70c4-410e-b828-37eee9a1d63f-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.011543 4932 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.011735 4932 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71") on node "crc" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.072973 4932 reconciler_common.go:293] "Volume detached for volume \"pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.398008 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"39fa75e8-70c4-410e-b828-37eee9a1d63f","Type":"ContainerDied","Data":"dda8f7ab07b98042c2ccf86b9a73fc615c47002503e6d98ee4854a988de0dd99"} Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.398419 4932 scope.go:117] "RemoveContainer" containerID="0d4a60740bfae56789929dac56781b26df30ba4ec23c9e5f6b983cac3f931e7d" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.398070 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.400748 4932 generic.go:334] "Generic (PLEG): container finished" podID="b90df741-01e1-480d-89bf-040a0f6bef5f" containerID="0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc" exitCode=137 Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.400781 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.432461 4932 scope.go:117] "RemoveContainer" containerID="97550ed88fc51beee5f152521dfb6767671e19c9c1308ab47349030fdbc31f4f" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.438143 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.441030 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="b90df741-01e1-480d-89bf-040a0f6bef5f" podUID="f9e1331e-adee-4b7b-a604-6fac318106dc" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.447167 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.474352 4932 scope.go:117] "RemoveContainer" containerID="53aaa87d4cdf8040bf21065da8a0923b75e365b809321c0d42057556ce95aace" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.487288 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:37:15 crc kubenswrapper[4932]: E1125 10:37:15.487999 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="init-config-reloader" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.488089 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="init-config-reloader" Nov 25 10:37:15 crc kubenswrapper[4932]: E1125 10:37:15.488234 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="config-reloader" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.488332 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="config-reloader" Nov 25 10:37:15 crc kubenswrapper[4932]: E1125 10:37:15.488413 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="thanos-sidecar" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.488486 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="thanos-sidecar" Nov 25 10:37:15 crc kubenswrapper[4932]: E1125 10:37:15.488568 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="prometheus" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.488637 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="prometheus" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.488915 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="thanos-sidecar" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.489060 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="prometheus" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.496709 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" containerName="config-reloader" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.497971 4932 scope.go:117] "RemoveContainer" containerID="ad69d8c7f6ece79945df981c66a5e2051396e1fba799bac7dc9b442b80096f40" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.499695 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.503379 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.504489 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.504671 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.505005 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-2zs26" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.508856 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.512261 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.531405 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.531468 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.562380 4932 scope.go:117] "RemoveContainer" containerID="0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.583687 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.583766 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.583808 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-config\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.583839 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a83f344e-7cbc-4fab-afb4-412918fe2681-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.583869 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.583905 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.583927 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a83f344e-7cbc-4fab-afb4-412918fe2681-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.583952 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.584080 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dk9xj\" (UniqueName: \"kubernetes.io/projected/a83f344e-7cbc-4fab-afb4-412918fe2681-kube-api-access-dk9xj\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.584168 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a83f344e-7cbc-4fab-afb4-412918fe2681-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.584193 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.659592 4932 scope.go:117] "RemoveContainer" containerID="0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc" Nov 25 10:37:15 crc kubenswrapper[4932]: E1125 10:37:15.663601 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc\": container with ID starting with 0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc not found: ID does not exist" containerID="0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.663637 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc"} err="failed to get container status \"0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc\": rpc error: code = NotFound desc = could not find container \"0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc\": container with ID starting with 0950276bddd6e5238bfa12a2f369eca1e7ee6977d66ae613a290e9b70fc6d6cc not found: ID does not exist" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.687545 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dk9xj\" (UniqueName: \"kubernetes.io/projected/a83f344e-7cbc-4fab-afb4-412918fe2681-kube-api-access-dk9xj\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.687639 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a83f344e-7cbc-4fab-afb4-412918fe2681-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.687661 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.687716 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.687754 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.687781 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-config\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.687812 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a83f344e-7cbc-4fab-afb4-412918fe2681-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.687834 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.687868 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.687898 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a83f344e-7cbc-4fab-afb4-412918fe2681-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.687915 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.689268 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/a83f344e-7cbc-4fab-afb4-412918fe2681-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.700038 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/a83f344e-7cbc-4fab-afb4-412918fe2681-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.717987 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.719726 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-config\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.720591 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.721285 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.723895 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/a83f344e-7cbc-4fab-afb4-412918fe2681-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.723919 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.737365 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/a83f344e-7cbc-4fab-afb4-412918fe2681-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.738338 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dk9xj\" (UniqueName: \"kubernetes.io/projected/a83f344e-7cbc-4fab-afb4-412918fe2681-kube-api-access-dk9xj\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.752922 4932 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.752968 4932 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/9ea084770c02ed5a7ec854914453e15e6a92564817ab4fa949bbc517dd437f23/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.800884 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e8c7a23f-9f40-40d6-884c-a1ed9accca71\") pod \"prometheus-metric-storage-0\" (UID: \"a83f344e-7cbc-4fab-afb4-412918fe2681\") " pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:15 crc kubenswrapper[4932]: I1125 10:37:15.907246 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.433484 4932 scope.go:117] "RemoveContainer" containerID="6ade566e33724ea28b86f8bca09d1a9fb9ed2e2ad700d8545ecf2f63326764fc" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.462736 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.471005 4932 scope.go:117] "RemoveContainer" containerID="fa926cda7411684699e7acc49541bf3b4bb27e2b69ac73b93528fd33617bfe98" Nov 25 10:37:16 crc kubenswrapper[4932]: W1125 10:37:16.477719 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda83f344e_7cbc_4fab_afb4_412918fe2681.slice/crio-81113071796bc21cbc53649274540721948d5d502ade9a8613bc99590b1b05a3 WatchSource:0}: Error finding container 81113071796bc21cbc53649274540721948d5d502ade9a8613bc99590b1b05a3: Status 404 returned error can't find the container with id 81113071796bc21cbc53649274540721948d5d502ade9a8613bc99590b1b05a3 Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.505796 4932 scope.go:117] "RemoveContainer" containerID="7430ee6826b13766c94c4977758e114cf9a05c0c37e8918bcaa5526205b1047d" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.541153 4932 scope.go:117] "RemoveContainer" containerID="c1271bbd203bf128ebe6560aba8b9d8537f76fd3a43378f19caae83de8afc6cb" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.578162 4932 scope.go:117] "RemoveContainer" containerID="6f5808e023f540eeccb7882ca7e1bfc802f49e825ec3765724bdfc77d6470345" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.640524 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39fa75e8-70c4-410e-b828-37eee9a1d63f" path="/var/lib/kubelet/pods/39fa75e8-70c4-410e-b828-37eee9a1d63f/volumes" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.641992 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b90df741-01e1-480d-89bf-040a0f6bef5f" path="/var/lib/kubelet/pods/b90df741-01e1-480d-89bf-040a0f6bef5f/volumes" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.642583 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.646014 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.646918 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.648075 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.648409 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.724867 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.724923 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-run-httpd\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.724964 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46tph\" (UniqueName: \"kubernetes.io/projected/69d728ff-917d-4977-9da7-0c3a316cc805-kube-api-access-46tph\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.725015 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-scripts\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.725047 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-config-data\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.725141 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-log-httpd\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.725166 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.827163 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-log-httpd\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.827233 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.827259 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.827285 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-run-httpd\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.827325 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46tph\" (UniqueName: \"kubernetes.io/projected/69d728ff-917d-4977-9da7-0c3a316cc805-kube-api-access-46tph\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.827383 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-scripts\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.827417 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-config-data\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.827628 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-log-httpd\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.827990 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-run-httpd\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.833301 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.833325 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-scripts\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.833411 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-config-data\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.833904 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.848123 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46tph\" (UniqueName: \"kubernetes.io/projected/69d728ff-917d-4977-9da7-0c3a316cc805-kube-api-access-46tph\") pod \"ceilometer-0\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " pod="openstack/ceilometer-0" Nov 25 10:37:16 crc kubenswrapper[4932]: I1125 10:37:16.972811 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:37:17 crc kubenswrapper[4932]: I1125 10:37:17.428141 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a83f344e-7cbc-4fab-afb4-412918fe2681","Type":"ContainerStarted","Data":"81113071796bc21cbc53649274540721948d5d502ade9a8613bc99590b1b05a3"} Nov 25 10:37:17 crc kubenswrapper[4932]: I1125 10:37:17.464724 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:37:17 crc kubenswrapper[4932]: W1125 10:37:17.467734 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod69d728ff_917d_4977_9da7_0c3a316cc805.slice/crio-ea22416e33dde95b6602b82638b75e9d2ebba02b88b95f2418624be16745cea7 WatchSource:0}: Error finding container ea22416e33dde95b6602b82638b75e9d2ebba02b88b95f2418624be16745cea7: Status 404 returned error can't find the container with id ea22416e33dde95b6602b82638b75e9d2ebba02b88b95f2418624be16745cea7 Nov 25 10:37:18 crc kubenswrapper[4932]: I1125 10:37:18.438561 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69d728ff-917d-4977-9da7-0c3a316cc805","Type":"ContainerStarted","Data":"ea22416e33dde95b6602b82638b75e9d2ebba02b88b95f2418624be16745cea7"} Nov 25 10:37:20 crc kubenswrapper[4932]: I1125 10:37:20.463259 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69d728ff-917d-4977-9da7-0c3a316cc805","Type":"ContainerStarted","Data":"d9529193b0a3c993177a1640ba61e348561be03f6ac16b92746a9bce5668a719"} Nov 25 10:37:20 crc kubenswrapper[4932]: I1125 10:37:20.465824 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a83f344e-7cbc-4fab-afb4-412918fe2681","Type":"ContainerStarted","Data":"a8cf7a192dead1716fbe70cd29c3726a583f9ec424a516665ddbb352f9e1b9c1"} Nov 25 10:37:21 crc kubenswrapper[4932]: I1125 10:37:21.482355 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69d728ff-917d-4977-9da7-0c3a316cc805","Type":"ContainerStarted","Data":"81d86de699eed7a1b634835af1689ff6f77a6ec41d70ec22f34cb75ed668d0cd"} Nov 25 10:37:22 crc kubenswrapper[4932]: I1125 10:37:22.494018 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69d728ff-917d-4977-9da7-0c3a316cc805","Type":"ContainerStarted","Data":"74b60317958a6af423b1606313d4cebdae9b55b624f1efa0460a33724029542c"} Nov 25 10:37:24 crc kubenswrapper[4932]: I1125 10:37:24.606722 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:37:24 crc kubenswrapper[4932]: E1125 10:37:24.607559 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:37:25 crc kubenswrapper[4932]: I1125 10:37:25.524654 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69d728ff-917d-4977-9da7-0c3a316cc805","Type":"ContainerStarted","Data":"7d63b8d9ae9937e20f2a176371fc340c3e16c9f48c83102129f8fa1a093eeb77"} Nov 25 10:37:25 crc kubenswrapper[4932]: I1125 10:37:25.524979 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:37:25 crc kubenswrapper[4932]: I1125 10:37:25.545924 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.332432004 podStartE2EDuration="9.545900693s" podCreationTimestamp="2025-11-25 10:37:16 +0000 UTC" firstStartedPulling="2025-11-25 10:37:17.470176164 +0000 UTC m=+6497.596205717" lastFinishedPulling="2025-11-25 10:37:24.683644843 +0000 UTC m=+6504.809674406" observedRunningTime="2025-11-25 10:37:25.543934337 +0000 UTC m=+6505.669963900" watchObservedRunningTime="2025-11-25 10:37:25.545900693 +0000 UTC m=+6505.671930256" Nov 25 10:37:28 crc kubenswrapper[4932]: I1125 10:37:28.559088 4932 generic.go:334] "Generic (PLEG): container finished" podID="a83f344e-7cbc-4fab-afb4-412918fe2681" containerID="a8cf7a192dead1716fbe70cd29c3726a583f9ec424a516665ddbb352f9e1b9c1" exitCode=0 Nov 25 10:37:28 crc kubenswrapper[4932]: I1125 10:37:28.559174 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a83f344e-7cbc-4fab-afb4-412918fe2681","Type":"ContainerDied","Data":"a8cf7a192dead1716fbe70cd29c3726a583f9ec424a516665ddbb352f9e1b9c1"} Nov 25 10:37:29 crc kubenswrapper[4932]: I1125 10:37:29.574331 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a83f344e-7cbc-4fab-afb4-412918fe2681","Type":"ContainerStarted","Data":"39cff16b43584e5552c261b9881da99adede21f8390d5a5cc4a1ea6566e538a9"} Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.148888 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-zh8rp"] Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.150681 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-zh8rp" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.180356 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-zh8rp"] Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.232675 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-operator-scripts\") pod \"aodh-db-create-zh8rp\" (UID: \"428eabdc-4f44-4b96-b4d0-3ab48106d1ed\") " pod="openstack/aodh-db-create-zh8rp" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.233016 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrgtq\" (UniqueName: \"kubernetes.io/projected/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-kube-api-access-vrgtq\") pod \"aodh-db-create-zh8rp\" (UID: \"428eabdc-4f44-4b96-b4d0-3ab48106d1ed\") " pod="openstack/aodh-db-create-zh8rp" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.335230 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-operator-scripts\") pod \"aodh-db-create-zh8rp\" (UID: \"428eabdc-4f44-4b96-b4d0-3ab48106d1ed\") " pod="openstack/aodh-db-create-zh8rp" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.335476 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrgtq\" (UniqueName: \"kubernetes.io/projected/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-kube-api-access-vrgtq\") pod \"aodh-db-create-zh8rp\" (UID: \"428eabdc-4f44-4b96-b4d0-3ab48106d1ed\") " pod="openstack/aodh-db-create-zh8rp" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.336262 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-operator-scripts\") pod \"aodh-db-create-zh8rp\" (UID: \"428eabdc-4f44-4b96-b4d0-3ab48106d1ed\") " pod="openstack/aodh-db-create-zh8rp" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.354167 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-8008-account-create-x6zwq"] Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.359607 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrgtq\" (UniqueName: \"kubernetes.io/projected/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-kube-api-access-vrgtq\") pod \"aodh-db-create-zh8rp\" (UID: \"428eabdc-4f44-4b96-b4d0-3ab48106d1ed\") " pod="openstack/aodh-db-create-zh8rp" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.359831 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8008-account-create-x6zwq" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.363561 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.373913 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-8008-account-create-x6zwq"] Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.437681 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/229934ff-1f5d-4203-a3e8-93c0cc404320-operator-scripts\") pod \"aodh-8008-account-create-x6zwq\" (UID: \"229934ff-1f5d-4203-a3e8-93c0cc404320\") " pod="openstack/aodh-8008-account-create-x6zwq" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.437915 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fvq8\" (UniqueName: \"kubernetes.io/projected/229934ff-1f5d-4203-a3e8-93c0cc404320-kube-api-access-8fvq8\") pod \"aodh-8008-account-create-x6zwq\" (UID: \"229934ff-1f5d-4203-a3e8-93c0cc404320\") " pod="openstack/aodh-8008-account-create-x6zwq" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.482937 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-zh8rp" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.546593 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/229934ff-1f5d-4203-a3e8-93c0cc404320-operator-scripts\") pod \"aodh-8008-account-create-x6zwq\" (UID: \"229934ff-1f5d-4203-a3e8-93c0cc404320\") " pod="openstack/aodh-8008-account-create-x6zwq" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.547090 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fvq8\" (UniqueName: \"kubernetes.io/projected/229934ff-1f5d-4203-a3e8-93c0cc404320-kube-api-access-8fvq8\") pod \"aodh-8008-account-create-x6zwq\" (UID: \"229934ff-1f5d-4203-a3e8-93c0cc404320\") " pod="openstack/aodh-8008-account-create-x6zwq" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.547357 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/229934ff-1f5d-4203-a3e8-93c0cc404320-operator-scripts\") pod \"aodh-8008-account-create-x6zwq\" (UID: \"229934ff-1f5d-4203-a3e8-93c0cc404320\") " pod="openstack/aodh-8008-account-create-x6zwq" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.571792 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fvq8\" (UniqueName: \"kubernetes.io/projected/229934ff-1f5d-4203-a3e8-93c0cc404320-kube-api-access-8fvq8\") pod \"aodh-8008-account-create-x6zwq\" (UID: \"229934ff-1f5d-4203-a3e8-93c0cc404320\") " pod="openstack/aodh-8008-account-create-x6zwq" Nov 25 10:37:30 crc kubenswrapper[4932]: I1125 10:37:30.741038 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8008-account-create-x6zwq" Nov 25 10:37:31 crc kubenswrapper[4932]: I1125 10:37:31.032549 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-zh8rp"] Nov 25 10:37:31 crc kubenswrapper[4932]: I1125 10:37:31.298045 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-8008-account-create-x6zwq"] Nov 25 10:37:31 crc kubenswrapper[4932]: I1125 10:37:31.594999 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-8008-account-create-x6zwq" event={"ID":"229934ff-1f5d-4203-a3e8-93c0cc404320","Type":"ContainerStarted","Data":"7837daee626fa7a39ac5313966122fd17d3d14d9b8107049b3e73ed50e7b0add"} Nov 25 10:37:31 crc kubenswrapper[4932]: I1125 10:37:31.596535 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-zh8rp" event={"ID":"428eabdc-4f44-4b96-b4d0-3ab48106d1ed","Type":"ContainerStarted","Data":"97856b3c9d38270e47c55bd36dac9a89dbcbacd6750dc48e7dce2b2d4f731149"} Nov 25 10:37:32 crc kubenswrapper[4932]: I1125 10:37:32.631688 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-8008-account-create-x6zwq" podStartSLOduration=2.631669978 podStartE2EDuration="2.631669978s" podCreationTimestamp="2025-11-25 10:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:37:32.622590199 +0000 UTC m=+6512.748619782" watchObservedRunningTime="2025-11-25 10:37:32.631669978 +0000 UTC m=+6512.757699541" Nov 25 10:37:32 crc kubenswrapper[4932]: I1125 10:37:32.655102 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-8008-account-create-x6zwq" event={"ID":"229934ff-1f5d-4203-a3e8-93c0cc404320","Type":"ContainerStarted","Data":"f435db27f64f5df74736326a7391f4e1cc51d59432c812b74ec11f1022b98e14"} Nov 25 10:37:32 crc kubenswrapper[4932]: I1125 10:37:32.655172 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a83f344e-7cbc-4fab-afb4-412918fe2681","Type":"ContainerStarted","Data":"65a8b49c16252e8a634bd74528fdd0b3d763cb5c99ececc72189a3e5c0146851"} Nov 25 10:37:32 crc kubenswrapper[4932]: I1125 10:37:32.655206 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-zh8rp" event={"ID":"428eabdc-4f44-4b96-b4d0-3ab48106d1ed","Type":"ContainerStarted","Data":"d48749efb5ed5e823ceab5e992d8b2a2f2deb791ad6379dbedf9b6c8afc76988"} Nov 25 10:37:32 crc kubenswrapper[4932]: I1125 10:37:32.663972 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-zh8rp" podStartSLOduration=2.663953152 podStartE2EDuration="2.663953152s" podCreationTimestamp="2025-11-25 10:37:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:37:32.640442779 +0000 UTC m=+6512.766472352" watchObservedRunningTime="2025-11-25 10:37:32.663953152 +0000 UTC m=+6512.789982715" Nov 25 10:37:33 crc kubenswrapper[4932]: I1125 10:37:33.625340 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"a83f344e-7cbc-4fab-afb4-412918fe2681","Type":"ContainerStarted","Data":"07a485e2c45240ecf9767ab537254a2f3e06476f1ac7fac9fd8fc2532b7db323"} Nov 25 10:37:33 crc kubenswrapper[4932]: I1125 10:37:33.627013 4932 generic.go:334] "Generic (PLEG): container finished" podID="428eabdc-4f44-4b96-b4d0-3ab48106d1ed" containerID="d48749efb5ed5e823ceab5e992d8b2a2f2deb791ad6379dbedf9b6c8afc76988" exitCode=0 Nov 25 10:37:33 crc kubenswrapper[4932]: I1125 10:37:33.627077 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-zh8rp" event={"ID":"428eabdc-4f44-4b96-b4d0-3ab48106d1ed","Type":"ContainerDied","Data":"d48749efb5ed5e823ceab5e992d8b2a2f2deb791ad6379dbedf9b6c8afc76988"} Nov 25 10:37:33 crc kubenswrapper[4932]: I1125 10:37:33.629117 4932 generic.go:334] "Generic (PLEG): container finished" podID="229934ff-1f5d-4203-a3e8-93c0cc404320" containerID="f435db27f64f5df74736326a7391f4e1cc51d59432c812b74ec11f1022b98e14" exitCode=0 Nov 25 10:37:33 crc kubenswrapper[4932]: I1125 10:37:33.629170 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-8008-account-create-x6zwq" event={"ID":"229934ff-1f5d-4203-a3e8-93c0cc404320","Type":"ContainerDied","Data":"f435db27f64f5df74736326a7391f4e1cc51d59432c812b74ec11f1022b98e14"} Nov 25 10:37:33 crc kubenswrapper[4932]: I1125 10:37:33.656933 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=18.656906932 podStartE2EDuration="18.656906932s" podCreationTimestamp="2025-11-25 10:37:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:37:33.655837272 +0000 UTC m=+6513.781866885" watchObservedRunningTime="2025-11-25 10:37:33.656906932 +0000 UTC m=+6513.782936505" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.124547 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8008-account-create-x6zwq" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.131427 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-zh8rp" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.250563 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-operator-scripts\") pod \"428eabdc-4f44-4b96-b4d0-3ab48106d1ed\" (UID: \"428eabdc-4f44-4b96-b4d0-3ab48106d1ed\") " Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.250624 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/229934ff-1f5d-4203-a3e8-93c0cc404320-operator-scripts\") pod \"229934ff-1f5d-4203-a3e8-93c0cc404320\" (UID: \"229934ff-1f5d-4203-a3e8-93c0cc404320\") " Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.250650 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrgtq\" (UniqueName: \"kubernetes.io/projected/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-kube-api-access-vrgtq\") pod \"428eabdc-4f44-4b96-b4d0-3ab48106d1ed\" (UID: \"428eabdc-4f44-4b96-b4d0-3ab48106d1ed\") " Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.250722 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fvq8\" (UniqueName: \"kubernetes.io/projected/229934ff-1f5d-4203-a3e8-93c0cc404320-kube-api-access-8fvq8\") pod \"229934ff-1f5d-4203-a3e8-93c0cc404320\" (UID: \"229934ff-1f5d-4203-a3e8-93c0cc404320\") " Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.251416 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "428eabdc-4f44-4b96-b4d0-3ab48106d1ed" (UID: "428eabdc-4f44-4b96-b4d0-3ab48106d1ed"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.251458 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/229934ff-1f5d-4203-a3e8-93c0cc404320-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "229934ff-1f5d-4203-a3e8-93c0cc404320" (UID: "229934ff-1f5d-4203-a3e8-93c0cc404320"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.252661 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.252689 4932 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/229934ff-1f5d-4203-a3e8-93c0cc404320-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.257518 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-kube-api-access-vrgtq" (OuterVolumeSpecName: "kube-api-access-vrgtq") pod "428eabdc-4f44-4b96-b4d0-3ab48106d1ed" (UID: "428eabdc-4f44-4b96-b4d0-3ab48106d1ed"). InnerVolumeSpecName "kube-api-access-vrgtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.257880 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/229934ff-1f5d-4203-a3e8-93c0cc404320-kube-api-access-8fvq8" (OuterVolumeSpecName: "kube-api-access-8fvq8") pod "229934ff-1f5d-4203-a3e8-93c0cc404320" (UID: "229934ff-1f5d-4203-a3e8-93c0cc404320"). InnerVolumeSpecName "kube-api-access-8fvq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.355431 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrgtq\" (UniqueName: \"kubernetes.io/projected/428eabdc-4f44-4b96-b4d0-3ab48106d1ed-kube-api-access-vrgtq\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.355619 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fvq8\" (UniqueName: \"kubernetes.io/projected/229934ff-1f5d-4203-a3e8-93c0cc404320-kube-api-access-8fvq8\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.651695 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-zh8rp" event={"ID":"428eabdc-4f44-4b96-b4d0-3ab48106d1ed","Type":"ContainerDied","Data":"97856b3c9d38270e47c55bd36dac9a89dbcbacd6750dc48e7dce2b2d4f731149"} Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.652006 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97856b3c9d38270e47c55bd36dac9a89dbcbacd6750dc48e7dce2b2d4f731149" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.651904 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-zh8rp" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.653772 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-8008-account-create-x6zwq" event={"ID":"229934ff-1f5d-4203-a3e8-93c0cc404320","Type":"ContainerDied","Data":"7837daee626fa7a39ac5313966122fd17d3d14d9b8107049b3e73ed50e7b0add"} Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.653799 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7837daee626fa7a39ac5313966122fd17d3d14d9b8107049b3e73ed50e7b0add" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.653867 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-8008-account-create-x6zwq" Nov 25 10:37:35 crc kubenswrapper[4932]: E1125 10:37:35.785346 4932 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod428eabdc_4f44_4b96_b4d0_3ab48106d1ed.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod428eabdc_4f44_4b96_b4d0_3ab48106d1ed.slice/crio-97856b3c9d38270e47c55bd36dac9a89dbcbacd6750dc48e7dce2b2d4f731149\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod229934ff_1f5d_4203_a3e8_93c0cc404320.slice/crio-7837daee626fa7a39ac5313966122fd17d3d14d9b8107049b3e73ed50e7b0add\": RecentStats: unable to find data in memory cache]" Nov 25 10:37:35 crc kubenswrapper[4932]: I1125 10:37:35.908313 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:39 crc kubenswrapper[4932]: I1125 10:37:39.605863 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:37:39 crc kubenswrapper[4932]: E1125 10:37:39.606837 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.718857 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-jcfxx"] Nov 25 10:37:40 crc kubenswrapper[4932]: E1125 10:37:40.719732 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="428eabdc-4f44-4b96-b4d0-3ab48106d1ed" containerName="mariadb-database-create" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.719752 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="428eabdc-4f44-4b96-b4d0-3ab48106d1ed" containerName="mariadb-database-create" Nov 25 10:37:40 crc kubenswrapper[4932]: E1125 10:37:40.719804 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="229934ff-1f5d-4203-a3e8-93c0cc404320" containerName="mariadb-account-create" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.719814 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="229934ff-1f5d-4203-a3e8-93c0cc404320" containerName="mariadb-account-create" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.720097 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="428eabdc-4f44-4b96-b4d0-3ab48106d1ed" containerName="mariadb-database-create" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.720112 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="229934ff-1f5d-4203-a3e8-93c0cc404320" containerName="mariadb-account-create" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.721104 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.722855 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.727151 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-d5glh" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.727301 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.727398 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.734259 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-jcfxx"] Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.779634 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-config-data\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.779767 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-combined-ca-bundle\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.779838 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z8tc\" (UniqueName: \"kubernetes.io/projected/4ad43add-2f06-42b9-9802-9eb44383b894-kube-api-access-8z8tc\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.779875 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-scripts\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.881694 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-combined-ca-bundle\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.881811 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z8tc\" (UniqueName: \"kubernetes.io/projected/4ad43add-2f06-42b9-9802-9eb44383b894-kube-api-access-8z8tc\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.881852 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-scripts\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.882013 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-config-data\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.891730 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-config-data\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.897527 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-scripts\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.897930 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-combined-ca-bundle\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:40 crc kubenswrapper[4932]: I1125 10:37:40.925897 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z8tc\" (UniqueName: \"kubernetes.io/projected/4ad43add-2f06-42b9-9802-9eb44383b894-kube-api-access-8z8tc\") pod \"aodh-db-sync-jcfxx\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:41 crc kubenswrapper[4932]: I1125 10:37:41.051076 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:41 crc kubenswrapper[4932]: I1125 10:37:41.552668 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-jcfxx"] Nov 25 10:37:41 crc kubenswrapper[4932]: W1125 10:37:41.555329 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4ad43add_2f06_42b9_9802_9eb44383b894.slice/crio-67084a4ae577aad1c4af7d510e68232c25306cae8a8e93237e64ef539649d48a WatchSource:0}: Error finding container 67084a4ae577aad1c4af7d510e68232c25306cae8a8e93237e64ef539649d48a: Status 404 returned error can't find the container with id 67084a4ae577aad1c4af7d510e68232c25306cae8a8e93237e64ef539649d48a Nov 25 10:37:41 crc kubenswrapper[4932]: I1125 10:37:41.710921 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-jcfxx" event={"ID":"4ad43add-2f06-42b9-9802-9eb44383b894","Type":"ContainerStarted","Data":"67084a4ae577aad1c4af7d510e68232c25306cae8a8e93237e64ef539649d48a"} Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.049527 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-3891-account-create-8qvzt"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.062213 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-db76q"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.073308 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-c672-account-create-npp8v"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.081656 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-ljdqm"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.090181 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-fw5g6"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.098335 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-3179-account-create-fznsx"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.106430 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-db76q"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.128836 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-ljdqm"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.135756 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-3891-account-create-8qvzt"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.147467 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-fw5g6"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.157610 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-c672-account-create-npp8v"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.168434 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-3179-account-create-fznsx"] Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.618205 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b647685-9f5e-4dcb-a0a8-4739ecfb6330" path="/var/lib/kubelet/pods/9b647685-9f5e-4dcb-a0a8-4739ecfb6330/volumes" Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.619337 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5418610-a8c5-42e4-a5df-14af19703a42" path="/var/lib/kubelet/pods/a5418610-a8c5-42e4-a5df-14af19703a42/volumes" Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.620104 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aeb1135f-f2d3-4259-ae52-39f9df4a5582" path="/var/lib/kubelet/pods/aeb1135f-f2d3-4259-ae52-39f9df4a5582/volumes" Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.620851 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd469507-d0a9-4a7d-9a60-2dd43e09c8a8" path="/var/lib/kubelet/pods/bd469507-d0a9-4a7d-9a60-2dd43e09c8a8/volumes" Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.622149 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c76fa982-897c-4948-8ad7-a111c8399e74" path="/var/lib/kubelet/pods/c76fa982-897c-4948-8ad7-a111c8399e74/volumes" Nov 25 10:37:44 crc kubenswrapper[4932]: I1125 10:37:44.659795 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb900453-5ad2-4591-a371-59be9fe5f5f4" path="/var/lib/kubelet/pods/eb900453-5ad2-4591-a371-59be9fe5f5f4/volumes" Nov 25 10:37:45 crc kubenswrapper[4932]: I1125 10:37:45.763910 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-jcfxx" event={"ID":"4ad43add-2f06-42b9-9802-9eb44383b894","Type":"ContainerStarted","Data":"796d85dd018fed20032b03e569aa651a6d8471f86844e249e7c4510321a8af65"} Nov 25 10:37:45 crc kubenswrapper[4932]: I1125 10:37:45.802696 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-jcfxx" podStartSLOduration=2.056900701 podStartE2EDuration="5.802668703s" podCreationTimestamp="2025-11-25 10:37:40 +0000 UTC" firstStartedPulling="2025-11-25 10:37:41.556868744 +0000 UTC m=+6521.682898297" lastFinishedPulling="2025-11-25 10:37:45.302636736 +0000 UTC m=+6525.428666299" observedRunningTime="2025-11-25 10:37:45.791448342 +0000 UTC m=+6525.917477935" watchObservedRunningTime="2025-11-25 10:37:45.802668703 +0000 UTC m=+6525.928698296" Nov 25 10:37:45 crc kubenswrapper[4932]: I1125 10:37:45.907999 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:45 crc kubenswrapper[4932]: I1125 10:37:45.915156 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:46 crc kubenswrapper[4932]: I1125 10:37:46.779850 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 25 10:37:46 crc kubenswrapper[4932]: I1125 10:37:46.979436 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 10:37:47 crc kubenswrapper[4932]: I1125 10:37:47.791117 4932 generic.go:334] "Generic (PLEG): container finished" podID="4ad43add-2f06-42b9-9802-9eb44383b894" containerID="796d85dd018fed20032b03e569aa651a6d8471f86844e249e7c4510321a8af65" exitCode=0 Nov 25 10:37:47 crc kubenswrapper[4932]: I1125 10:37:47.791220 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-jcfxx" event={"ID":"4ad43add-2f06-42b9-9802-9eb44383b894","Type":"ContainerDied","Data":"796d85dd018fed20032b03e569aa651a6d8471f86844e249e7c4510321a8af65"} Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.198029 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.263280 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-config-data\") pod \"4ad43add-2f06-42b9-9802-9eb44383b894\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.263541 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-combined-ca-bundle\") pod \"4ad43add-2f06-42b9-9802-9eb44383b894\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.263567 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8z8tc\" (UniqueName: \"kubernetes.io/projected/4ad43add-2f06-42b9-9802-9eb44383b894-kube-api-access-8z8tc\") pod \"4ad43add-2f06-42b9-9802-9eb44383b894\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.263586 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-scripts\") pod \"4ad43add-2f06-42b9-9802-9eb44383b894\" (UID: \"4ad43add-2f06-42b9-9802-9eb44383b894\") " Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.280347 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-scripts" (OuterVolumeSpecName: "scripts") pod "4ad43add-2f06-42b9-9802-9eb44383b894" (UID: "4ad43add-2f06-42b9-9802-9eb44383b894"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.287949 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ad43add-2f06-42b9-9802-9eb44383b894-kube-api-access-8z8tc" (OuterVolumeSpecName: "kube-api-access-8z8tc") pod "4ad43add-2f06-42b9-9802-9eb44383b894" (UID: "4ad43add-2f06-42b9-9802-9eb44383b894"). InnerVolumeSpecName "kube-api-access-8z8tc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.293326 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-config-data" (OuterVolumeSpecName: "config-data") pod "4ad43add-2f06-42b9-9802-9eb44383b894" (UID: "4ad43add-2f06-42b9-9802-9eb44383b894"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.294736 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4ad43add-2f06-42b9-9802-9eb44383b894" (UID: "4ad43add-2f06-42b9-9802-9eb44383b894"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.365748 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.365787 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8z8tc\" (UniqueName: \"kubernetes.io/projected/4ad43add-2f06-42b9-9802-9eb44383b894-kube-api-access-8z8tc\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.365799 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.365808 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4ad43add-2f06-42b9-9802-9eb44383b894-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.816688 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-jcfxx" event={"ID":"4ad43add-2f06-42b9-9802-9eb44383b894","Type":"ContainerDied","Data":"67084a4ae577aad1c4af7d510e68232c25306cae8a8e93237e64ef539649d48a"} Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.817046 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67084a4ae577aad1c4af7d510e68232c25306cae8a8e93237e64ef539649d48a" Nov 25 10:37:49 crc kubenswrapper[4932]: I1125 10:37:49.817114 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-jcfxx" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.279727 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 25 10:37:50 crc kubenswrapper[4932]: E1125 10:37:50.281025 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ad43add-2f06-42b9-9802-9eb44383b894" containerName="aodh-db-sync" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.281136 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ad43add-2f06-42b9-9802-9eb44383b894" containerName="aodh-db-sync" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.281545 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ad43add-2f06-42b9-9802-9eb44383b894" containerName="aodh-db-sync" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.283906 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.287150 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.287288 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.289119 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.290164 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-d5glh" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.385755 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-combined-ca-bundle\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.385876 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzgw8\" (UniqueName: \"kubernetes.io/projected/ee193c35-f11b-4708-8d29-0905452cd59e-kube-api-access-kzgw8\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.385918 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-scripts\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.385985 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-config-data\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.488505 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-config-data\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.488736 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-combined-ca-bundle\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.488867 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzgw8\" (UniqueName: \"kubernetes.io/projected/ee193c35-f11b-4708-8d29-0905452cd59e-kube-api-access-kzgw8\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.488930 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-scripts\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.496019 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-combined-ca-bundle\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.498788 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-scripts\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.508832 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-config-data\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.511345 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzgw8\" (UniqueName: \"kubernetes.io/projected/ee193c35-f11b-4708-8d29-0905452cd59e-kube-api-access-kzgw8\") pod \"aodh-0\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " pod="openstack/aodh-0" Nov 25 10:37:50 crc kubenswrapper[4932]: I1125 10:37:50.606296 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:37:51 crc kubenswrapper[4932]: I1125 10:37:51.124070 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:37:51 crc kubenswrapper[4932]: W1125 10:37:51.128278 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee193c35_f11b_4708_8d29_0905452cd59e.slice/crio-7e85f7138473b8835317fa52bd28d29c0abdf2968fa5c578c6deadca4a2164a2 WatchSource:0}: Error finding container 7e85f7138473b8835317fa52bd28d29c0abdf2968fa5c578c6deadca4a2164a2: Status 404 returned error can't find the container with id 7e85f7138473b8835317fa52bd28d29c0abdf2968fa5c578c6deadca4a2164a2 Nov 25 10:37:51 crc kubenswrapper[4932]: I1125 10:37:51.841306 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ee193c35-f11b-4708-8d29-0905452cd59e","Type":"ContainerStarted","Data":"7e85f7138473b8835317fa52bd28d29c0abdf2968fa5c578c6deadca4a2164a2"} Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.317776 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.318029 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="ceilometer-central-agent" containerID="cri-o://d9529193b0a3c993177a1640ba61e348561be03f6ac16b92746a9bce5668a719" gracePeriod=30 Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.318118 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="sg-core" containerID="cri-o://74b60317958a6af423b1606313d4cebdae9b55b624f1efa0460a33724029542c" gracePeriod=30 Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.318144 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="ceilometer-notification-agent" containerID="cri-o://81d86de699eed7a1b634835af1689ff6f77a6ec41d70ec22f34cb75ed668d0cd" gracePeriod=30 Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.318104 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="proxy-httpd" containerID="cri-o://7d63b8d9ae9937e20f2a176371fc340c3e16c9f48c83102129f8fa1a093eeb77" gracePeriod=30 Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.607173 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:37:52 crc kubenswrapper[4932]: E1125 10:37:52.607630 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.853786 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ee193c35-f11b-4708-8d29-0905452cd59e","Type":"ContainerStarted","Data":"e094faf54dfa874834aad777d22318cd6bdeee470c2983839e02979549e750cd"} Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.858969 4932 generic.go:334] "Generic (PLEG): container finished" podID="69d728ff-917d-4977-9da7-0c3a316cc805" containerID="7d63b8d9ae9937e20f2a176371fc340c3e16c9f48c83102129f8fa1a093eeb77" exitCode=0 Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.859003 4932 generic.go:334] "Generic (PLEG): container finished" podID="69d728ff-917d-4977-9da7-0c3a316cc805" containerID="74b60317958a6af423b1606313d4cebdae9b55b624f1efa0460a33724029542c" exitCode=2 Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.859014 4932 generic.go:334] "Generic (PLEG): container finished" podID="69d728ff-917d-4977-9da7-0c3a316cc805" containerID="d9529193b0a3c993177a1640ba61e348561be03f6ac16b92746a9bce5668a719" exitCode=0 Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.859036 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69d728ff-917d-4977-9da7-0c3a316cc805","Type":"ContainerDied","Data":"7d63b8d9ae9937e20f2a176371fc340c3e16c9f48c83102129f8fa1a093eeb77"} Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.859061 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69d728ff-917d-4977-9da7-0c3a316cc805","Type":"ContainerDied","Data":"74b60317958a6af423b1606313d4cebdae9b55b624f1efa0460a33724029542c"} Nov 25 10:37:52 crc kubenswrapper[4932]: I1125 10:37:52.859070 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69d728ff-917d-4977-9da7-0c3a316cc805","Type":"ContainerDied","Data":"d9529193b0a3c993177a1640ba61e348561be03f6ac16b92746a9bce5668a719"} Nov 25 10:37:53 crc kubenswrapper[4932]: I1125 10:37:53.926530 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ee193c35-f11b-4708-8d29-0905452cd59e","Type":"ContainerStarted","Data":"017e1e62f430a00b0f7fce27fd3cd98d432851b05d6137accebd730df4d51e10"} Nov 25 10:37:54 crc kubenswrapper[4932]: I1125 10:37:54.045860 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-4s296"] Nov 25 10:37:54 crc kubenswrapper[4932]: I1125 10:37:54.056609 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-4s296"] Nov 25 10:37:54 crc kubenswrapper[4932]: I1125 10:37:54.142518 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 25 10:37:54 crc kubenswrapper[4932]: I1125 10:37:54.621094 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="54b73df3-3af6-40b2-8116-47f32031ac99" path="/var/lib/kubelet/pods/54b73df3-3af6-40b2-8116-47f32031ac99/volumes" Nov 25 10:37:54 crc kubenswrapper[4932]: I1125 10:37:54.947374 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69d728ff-917d-4977-9da7-0c3a316cc805","Type":"ContainerDied","Data":"81d86de699eed7a1b634835af1689ff6f77a6ec41d70ec22f34cb75ed668d0cd"} Nov 25 10:37:54 crc kubenswrapper[4932]: I1125 10:37:54.947346 4932 generic.go:334] "Generic (PLEG): container finished" podID="69d728ff-917d-4977-9da7-0c3a316cc805" containerID="81d86de699eed7a1b634835af1689ff6f77a6ec41d70ec22f34cb75ed668d0cd" exitCode=0 Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.354221 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.529002 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46tph\" (UniqueName: \"kubernetes.io/projected/69d728ff-917d-4977-9da7-0c3a316cc805-kube-api-access-46tph\") pod \"69d728ff-917d-4977-9da7-0c3a316cc805\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.529169 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-log-httpd\") pod \"69d728ff-917d-4977-9da7-0c3a316cc805\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.529354 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-run-httpd\") pod \"69d728ff-917d-4977-9da7-0c3a316cc805\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.529410 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-scripts\") pod \"69d728ff-917d-4977-9da7-0c3a316cc805\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.529515 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-sg-core-conf-yaml\") pod \"69d728ff-917d-4977-9da7-0c3a316cc805\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.529582 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-config-data\") pod \"69d728ff-917d-4977-9da7-0c3a316cc805\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.529631 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-combined-ca-bundle\") pod \"69d728ff-917d-4977-9da7-0c3a316cc805\" (UID: \"69d728ff-917d-4977-9da7-0c3a316cc805\") " Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.530410 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "69d728ff-917d-4977-9da7-0c3a316cc805" (UID: "69d728ff-917d-4977-9da7-0c3a316cc805"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.530944 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "69d728ff-917d-4977-9da7-0c3a316cc805" (UID: "69d728ff-917d-4977-9da7-0c3a316cc805"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.536032 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-scripts" (OuterVolumeSpecName: "scripts") pod "69d728ff-917d-4977-9da7-0c3a316cc805" (UID: "69d728ff-917d-4977-9da7-0c3a316cc805"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.536109 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69d728ff-917d-4977-9da7-0c3a316cc805-kube-api-access-46tph" (OuterVolumeSpecName: "kube-api-access-46tph") pod "69d728ff-917d-4977-9da7-0c3a316cc805" (UID: "69d728ff-917d-4977-9da7-0c3a316cc805"). InnerVolumeSpecName "kube-api-access-46tph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.567036 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "69d728ff-917d-4977-9da7-0c3a316cc805" (UID: "69d728ff-917d-4977-9da7-0c3a316cc805"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.630254 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69d728ff-917d-4977-9da7-0c3a316cc805" (UID: "69d728ff-917d-4977-9da7-0c3a316cc805"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.632246 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.632276 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46tph\" (UniqueName: \"kubernetes.io/projected/69d728ff-917d-4977-9da7-0c3a316cc805-kube-api-access-46tph\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.632292 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.632305 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/69d728ff-917d-4977-9da7-0c3a316cc805-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.632316 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.632328 4932 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.666387 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-config-data" (OuterVolumeSpecName: "config-data") pod "69d728ff-917d-4977-9da7-0c3a316cc805" (UID: "69d728ff-917d-4977-9da7-0c3a316cc805"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.734671 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69d728ff-917d-4977-9da7-0c3a316cc805-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.960485 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"69d728ff-917d-4977-9da7-0c3a316cc805","Type":"ContainerDied","Data":"ea22416e33dde95b6602b82638b75e9d2ebba02b88b95f2418624be16745cea7"} Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.960570 4932 scope.go:117] "RemoveContainer" containerID="7d63b8d9ae9937e20f2a176371fc340c3e16c9f48c83102129f8fa1a093eeb77" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.960510 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.967964 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ee193c35-f11b-4708-8d29-0905452cd59e","Type":"ContainerStarted","Data":"eed3d8a9af3782509afdc4b29388b59da5720a47b6c918842b9cda899f4820c9"} Nov 25 10:37:55 crc kubenswrapper[4932]: I1125 10:37:55.990408 4932 scope.go:117] "RemoveContainer" containerID="74b60317958a6af423b1606313d4cebdae9b55b624f1efa0460a33724029542c" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.018418 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.032636 4932 scope.go:117] "RemoveContainer" containerID="81d86de699eed7a1b634835af1689ff6f77a6ec41d70ec22f34cb75ed668d0cd" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.032840 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.045043 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:37:56 crc kubenswrapper[4932]: E1125 10:37:56.045638 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="ceilometer-notification-agent" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.045668 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="ceilometer-notification-agent" Nov 25 10:37:56 crc kubenswrapper[4932]: E1125 10:37:56.045689 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="ceilometer-central-agent" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.045698 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="ceilometer-central-agent" Nov 25 10:37:56 crc kubenswrapper[4932]: E1125 10:37:56.045731 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="sg-core" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.045740 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="sg-core" Nov 25 10:37:56 crc kubenswrapper[4932]: E1125 10:37:56.045759 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="proxy-httpd" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.045768 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="proxy-httpd" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.046050 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="ceilometer-central-agent" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.046071 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="proxy-httpd" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.046087 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="ceilometer-notification-agent" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.046106 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" containerName="sg-core" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.062696 4932 scope.go:117] "RemoveContainer" containerID="d9529193b0a3c993177a1640ba61e348561be03f6ac16b92746a9bce5668a719" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.063776 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.068332 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.068492 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.069647 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.248422 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-log-httpd\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.248481 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kh8k4\" (UniqueName: \"kubernetes.io/projected/896342f2-4b23-46d9-b845-edbe1251fcfb-kube-api-access-kh8k4\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.248504 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.248521 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.248563 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-config-data\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.248590 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-scripts\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.248627 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-run-httpd\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.350561 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-run-httpd\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.350783 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-log-httpd\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.350828 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kh8k4\" (UniqueName: \"kubernetes.io/projected/896342f2-4b23-46d9-b845-edbe1251fcfb-kube-api-access-kh8k4\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.350856 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.350884 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.350954 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-config-data\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.350994 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-scripts\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.351493 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-log-httpd\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.352067 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-run-httpd\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.359394 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-scripts\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.368295 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.369599 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-config-data\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.370393 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.383583 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kh8k4\" (UniqueName: \"kubernetes.io/projected/896342f2-4b23-46d9-b845-edbe1251fcfb-kube-api-access-kh8k4\") pod \"ceilometer-0\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.389341 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:37:56 crc kubenswrapper[4932]: I1125 10:37:56.619814 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69d728ff-917d-4977-9da7-0c3a316cc805" path="/var/lib/kubelet/pods/69d728ff-917d-4977-9da7-0c3a316cc805/volumes" Nov 25 10:37:57 crc kubenswrapper[4932]: I1125 10:37:57.060033 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:37:57 crc kubenswrapper[4932]: I1125 10:37:57.097743 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:37:57 crc kubenswrapper[4932]: I1125 10:37:57.097972 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="c4644bf8-6142-462d-8c94-c07283f431a9" containerName="kube-state-metrics" containerID="cri-o://4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab" gracePeriod=30 Nov 25 10:37:57 crc kubenswrapper[4932]: I1125 10:37:57.294045 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:37:57 crc kubenswrapper[4932]: W1125 10:37:57.297844 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod896342f2_4b23_46d9_b845_edbe1251fcfb.slice/crio-6d41d228eb13052e8b9ced887905dd9be25a797039866ec9878f528b6393bb2c WatchSource:0}: Error finding container 6d41d228eb13052e8b9ced887905dd9be25a797039866ec9878f528b6393bb2c: Status 404 returned error can't find the container with id 6d41d228eb13052e8b9ced887905dd9be25a797039866ec9878f528b6393bb2c Nov 25 10:37:57 crc kubenswrapper[4932]: I1125 10:37:57.602499 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:37:57 crc kubenswrapper[4932]: I1125 10:37:57.786349 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hf9p\" (UniqueName: \"kubernetes.io/projected/c4644bf8-6142-462d-8c94-c07283f431a9-kube-api-access-7hf9p\") pod \"c4644bf8-6142-462d-8c94-c07283f431a9\" (UID: \"c4644bf8-6142-462d-8c94-c07283f431a9\") " Nov 25 10:37:57 crc kubenswrapper[4932]: I1125 10:37:57.790884 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4644bf8-6142-462d-8c94-c07283f431a9-kube-api-access-7hf9p" (OuterVolumeSpecName: "kube-api-access-7hf9p") pod "c4644bf8-6142-462d-8c94-c07283f431a9" (UID: "c4644bf8-6142-462d-8c94-c07283f431a9"). InnerVolumeSpecName "kube-api-access-7hf9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:37:57 crc kubenswrapper[4932]: I1125 10:37:57.890266 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hf9p\" (UniqueName: \"kubernetes.io/projected/c4644bf8-6142-462d-8c94-c07283f431a9-kube-api-access-7hf9p\") on node \"crc\" DevicePath \"\"" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.007450 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ee193c35-f11b-4708-8d29-0905452cd59e","Type":"ContainerStarted","Data":"ca619bdb9250efe22c9831916715c2ff7bb9bcc0ac6097116203f7638aebae15"} Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.007714 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-api" containerID="cri-o://e094faf54dfa874834aad777d22318cd6bdeee470c2983839e02979549e750cd" gracePeriod=30 Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.007869 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-notifier" containerID="cri-o://eed3d8a9af3782509afdc4b29388b59da5720a47b6c918842b9cda899f4820c9" gracePeriod=30 Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.007932 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-evaluator" containerID="cri-o://017e1e62f430a00b0f7fce27fd3cd98d432851b05d6137accebd730df4d51e10" gracePeriod=30 Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.008915 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-listener" containerID="cri-o://ca619bdb9250efe22c9831916715c2ff7bb9bcc0ac6097116203f7638aebae15" gracePeriod=30 Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.010637 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"896342f2-4b23-46d9-b845-edbe1251fcfb","Type":"ContainerStarted","Data":"6d41d228eb13052e8b9ced887905dd9be25a797039866ec9878f528b6393bb2c"} Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.018316 4932 generic.go:334] "Generic (PLEG): container finished" podID="c4644bf8-6142-462d-8c94-c07283f431a9" containerID="4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab" exitCode=2 Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.018456 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c4644bf8-6142-462d-8c94-c07283f431a9","Type":"ContainerDied","Data":"4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab"} Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.018509 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"c4644bf8-6142-462d-8c94-c07283f431a9","Type":"ContainerDied","Data":"16f2d20086f5431a9901746b2bb19d6d0a165dddca7ec8d54e4a515855917e50"} Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.018538 4932 scope.go:117] "RemoveContainer" containerID="4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.018759 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.042296 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.416488534 podStartE2EDuration="8.042251647s" podCreationTimestamp="2025-11-25 10:37:50 +0000 UTC" firstStartedPulling="2025-11-25 10:37:51.13290915 +0000 UTC m=+6531.258938713" lastFinishedPulling="2025-11-25 10:37:56.758672263 +0000 UTC m=+6536.884701826" observedRunningTime="2025-11-25 10:37:58.032988792 +0000 UTC m=+6538.159018345" watchObservedRunningTime="2025-11-25 10:37:58.042251647 +0000 UTC m=+6538.168281220" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.065444 4932 scope.go:117] "RemoveContainer" containerID="4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab" Nov 25 10:37:58 crc kubenswrapper[4932]: E1125 10:37:58.068676 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab\": container with ID starting with 4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab not found: ID does not exist" containerID="4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.068730 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab"} err="failed to get container status \"4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab\": rpc error: code = NotFound desc = could not find container \"4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab\": container with ID starting with 4a1bdd10b34d5ae5dda8bdbaf0659a870e9cc784a7b52005620bce9c8a3dacab not found: ID does not exist" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.077942 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.092253 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.120763 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:37:58 crc kubenswrapper[4932]: E1125 10:37:58.122384 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4644bf8-6142-462d-8c94-c07283f431a9" containerName="kube-state-metrics" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.122418 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4644bf8-6142-462d-8c94-c07283f431a9" containerName="kube-state-metrics" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.122898 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4644bf8-6142-462d-8c94-c07283f431a9" containerName="kube-state-metrics" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.124562 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.127384 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.127817 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.131031 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.304064 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92b56b05-8133-4f01-a855-7bb7f523b38c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.304150 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/92b56b05-8133-4f01-a855-7bb7f523b38c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.304344 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/92b56b05-8133-4f01-a855-7bb7f523b38c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.304389 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfjt9\" (UniqueName: \"kubernetes.io/projected/92b56b05-8133-4f01-a855-7bb7f523b38c-kube-api-access-sfjt9\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.406574 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92b56b05-8133-4f01-a855-7bb7f523b38c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.406636 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/92b56b05-8133-4f01-a855-7bb7f523b38c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.406753 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/92b56b05-8133-4f01-a855-7bb7f523b38c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.406791 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfjt9\" (UniqueName: \"kubernetes.io/projected/92b56b05-8133-4f01-a855-7bb7f523b38c-kube-api-access-sfjt9\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.412977 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92b56b05-8133-4f01-a855-7bb7f523b38c-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.412977 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/92b56b05-8133-4f01-a855-7bb7f523b38c-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.421703 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/92b56b05-8133-4f01-a855-7bb7f523b38c-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.432332 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfjt9\" (UniqueName: \"kubernetes.io/projected/92b56b05-8133-4f01-a855-7bb7f523b38c-kube-api-access-sfjt9\") pod \"kube-state-metrics-0\" (UID: \"92b56b05-8133-4f01-a855-7bb7f523b38c\") " pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.459217 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.624920 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c4644bf8-6142-462d-8c94-c07283f431a9" path="/var/lib/kubelet/pods/c4644bf8-6142-462d-8c94-c07283f431a9/volumes" Nov 25 10:37:58 crc kubenswrapper[4932]: I1125 10:37:58.943197 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 10:37:58 crc kubenswrapper[4932]: W1125 10:37:58.947313 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod92b56b05_8133_4f01_a855_7bb7f523b38c.slice/crio-41f797d6af152196614e384f3524354824416622adf8a2087f9cf147c687e10a WatchSource:0}: Error finding container 41f797d6af152196614e384f3524354824416622adf8a2087f9cf147c687e10a: Status 404 returned error can't find the container with id 41f797d6af152196614e384f3524354824416622adf8a2087f9cf147c687e10a Nov 25 10:37:59 crc kubenswrapper[4932]: I1125 10:37:59.031124 4932 generic.go:334] "Generic (PLEG): container finished" podID="ee193c35-f11b-4708-8d29-0905452cd59e" containerID="017e1e62f430a00b0f7fce27fd3cd98d432851b05d6137accebd730df4d51e10" exitCode=0 Nov 25 10:37:59 crc kubenswrapper[4932]: I1125 10:37:59.031178 4932 generic.go:334] "Generic (PLEG): container finished" podID="ee193c35-f11b-4708-8d29-0905452cd59e" containerID="e094faf54dfa874834aad777d22318cd6bdeee470c2983839e02979549e750cd" exitCode=0 Nov 25 10:37:59 crc kubenswrapper[4932]: I1125 10:37:59.031219 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ee193c35-f11b-4708-8d29-0905452cd59e","Type":"ContainerDied","Data":"017e1e62f430a00b0f7fce27fd3cd98d432851b05d6137accebd730df4d51e10"} Nov 25 10:37:59 crc kubenswrapper[4932]: I1125 10:37:59.031266 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ee193c35-f11b-4708-8d29-0905452cd59e","Type":"ContainerDied","Data":"e094faf54dfa874834aad777d22318cd6bdeee470c2983839e02979549e750cd"} Nov 25 10:37:59 crc kubenswrapper[4932]: I1125 10:37:59.033644 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"896342f2-4b23-46d9-b845-edbe1251fcfb","Type":"ContainerStarted","Data":"36df9a0fe8ea0a8c15d5a0ffa11b812939daf35c4a71678f18e1c2dce0800c4a"} Nov 25 10:37:59 crc kubenswrapper[4932]: I1125 10:37:59.033791 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"896342f2-4b23-46d9-b845-edbe1251fcfb","Type":"ContainerStarted","Data":"bc60b36756c1d0d882150a7f746b0ac81f38fb82bbd7450f9c144b3509cfe1ff"} Nov 25 10:37:59 crc kubenswrapper[4932]: I1125 10:37:59.038086 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"92b56b05-8133-4f01-a855-7bb7f523b38c","Type":"ContainerStarted","Data":"41f797d6af152196614e384f3524354824416622adf8a2087f9cf147c687e10a"} Nov 25 10:38:00 crc kubenswrapper[4932]: I1125 10:38:00.052080 4932 generic.go:334] "Generic (PLEG): container finished" podID="ee193c35-f11b-4708-8d29-0905452cd59e" containerID="eed3d8a9af3782509afdc4b29388b59da5720a47b6c918842b9cda899f4820c9" exitCode=0 Nov 25 10:38:00 crc kubenswrapper[4932]: I1125 10:38:00.052125 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ee193c35-f11b-4708-8d29-0905452cd59e","Type":"ContainerDied","Data":"eed3d8a9af3782509afdc4b29388b59da5720a47b6c918842b9cda899f4820c9"} Nov 25 10:38:00 crc kubenswrapper[4932]: I1125 10:38:00.055533 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"896342f2-4b23-46d9-b845-edbe1251fcfb","Type":"ContainerStarted","Data":"34ac9fd38f4e83bddf5b0ddf6a79e8ae98491d7cba7643fdd946fec3403b0387"} Nov 25 10:38:00 crc kubenswrapper[4932]: I1125 10:38:00.057230 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"92b56b05-8133-4f01-a855-7bb7f523b38c","Type":"ContainerStarted","Data":"8598d59fa4e850d8d77e7040939460516180bf95e4182af24e0dce8ca88dac92"} Nov 25 10:38:00 crc kubenswrapper[4932]: I1125 10:38:00.057386 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 10:38:00 crc kubenswrapper[4932]: I1125 10:38:00.082046 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.725958211 podStartE2EDuration="2.082024589s" podCreationTimestamp="2025-11-25 10:37:58 +0000 UTC" firstStartedPulling="2025-11-25 10:37:58.949904747 +0000 UTC m=+6539.075934310" lastFinishedPulling="2025-11-25 10:37:59.305971125 +0000 UTC m=+6539.432000688" observedRunningTime="2025-11-25 10:38:00.074769101 +0000 UTC m=+6540.200798664" watchObservedRunningTime="2025-11-25 10:38:00.082024589 +0000 UTC m=+6540.208054152" Nov 25 10:38:02 crc kubenswrapper[4932]: I1125 10:38:02.079982 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"896342f2-4b23-46d9-b845-edbe1251fcfb","Type":"ContainerStarted","Data":"41b19be7998da6756ea9e9f9c6ede17cc0c5e0d3556700b3d313523150bfd1fb"} Nov 25 10:38:02 crc kubenswrapper[4932]: I1125 10:38:02.080637 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:38:02 crc kubenswrapper[4932]: I1125 10:38:02.080413 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="proxy-httpd" containerID="cri-o://41b19be7998da6756ea9e9f9c6ede17cc0c5e0d3556700b3d313523150bfd1fb" gracePeriod=30 Nov 25 10:38:02 crc kubenswrapper[4932]: I1125 10:38:02.080431 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="sg-core" containerID="cri-o://34ac9fd38f4e83bddf5b0ddf6a79e8ae98491d7cba7643fdd946fec3403b0387" gracePeriod=30 Nov 25 10:38:02 crc kubenswrapper[4932]: I1125 10:38:02.080445 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="ceilometer-notification-agent" containerID="cri-o://36df9a0fe8ea0a8c15d5a0ffa11b812939daf35c4a71678f18e1c2dce0800c4a" gracePeriod=30 Nov 25 10:38:02 crc kubenswrapper[4932]: I1125 10:38:02.080146 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="ceilometer-central-agent" containerID="cri-o://bc60b36756c1d0d882150a7f746b0ac81f38fb82bbd7450f9c144b3509cfe1ff" gracePeriod=30 Nov 25 10:38:02 crc kubenswrapper[4932]: I1125 10:38:02.114928 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.296470212 podStartE2EDuration="6.114905633s" podCreationTimestamp="2025-11-25 10:37:56 +0000 UTC" firstStartedPulling="2025-11-25 10:37:57.3003256 +0000 UTC m=+6537.426355163" lastFinishedPulling="2025-11-25 10:38:01.118761021 +0000 UTC m=+6541.244790584" observedRunningTime="2025-11-25 10:38:02.113949985 +0000 UTC m=+6542.239979548" watchObservedRunningTime="2025-11-25 10:38:02.114905633 +0000 UTC m=+6542.240935196" Nov 25 10:38:03 crc kubenswrapper[4932]: I1125 10:38:03.090524 4932 generic.go:334] "Generic (PLEG): container finished" podID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerID="41b19be7998da6756ea9e9f9c6ede17cc0c5e0d3556700b3d313523150bfd1fb" exitCode=0 Nov 25 10:38:03 crc kubenswrapper[4932]: I1125 10:38:03.090814 4932 generic.go:334] "Generic (PLEG): container finished" podID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerID="34ac9fd38f4e83bddf5b0ddf6a79e8ae98491d7cba7643fdd946fec3403b0387" exitCode=2 Nov 25 10:38:03 crc kubenswrapper[4932]: I1125 10:38:03.090825 4932 generic.go:334] "Generic (PLEG): container finished" podID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerID="36df9a0fe8ea0a8c15d5a0ffa11b812939daf35c4a71678f18e1c2dce0800c4a" exitCode=0 Nov 25 10:38:03 crc kubenswrapper[4932]: I1125 10:38:03.090596 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"896342f2-4b23-46d9-b845-edbe1251fcfb","Type":"ContainerDied","Data":"41b19be7998da6756ea9e9f9c6ede17cc0c5e0d3556700b3d313523150bfd1fb"} Nov 25 10:38:03 crc kubenswrapper[4932]: I1125 10:38:03.090860 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"896342f2-4b23-46d9-b845-edbe1251fcfb","Type":"ContainerDied","Data":"34ac9fd38f4e83bddf5b0ddf6a79e8ae98491d7cba7643fdd946fec3403b0387"} Nov 25 10:38:03 crc kubenswrapper[4932]: I1125 10:38:03.090874 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"896342f2-4b23-46d9-b845-edbe1251fcfb","Type":"ContainerDied","Data":"36df9a0fe8ea0a8c15d5a0ffa11b812939daf35c4a71678f18e1c2dce0800c4a"} Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.123571 4932 generic.go:334] "Generic (PLEG): container finished" podID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerID="bc60b36756c1d0d882150a7f746b0ac81f38fb82bbd7450f9c144b3509cfe1ff" exitCode=0 Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.123629 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"896342f2-4b23-46d9-b845-edbe1251fcfb","Type":"ContainerDied","Data":"bc60b36756c1d0d882150a7f746b0ac81f38fb82bbd7450f9c144b3509cfe1ff"} Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.403077 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.567581 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-log-httpd\") pod \"896342f2-4b23-46d9-b845-edbe1251fcfb\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.568390 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-combined-ca-bundle\") pod \"896342f2-4b23-46d9-b845-edbe1251fcfb\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.568477 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "896342f2-4b23-46d9-b845-edbe1251fcfb" (UID: "896342f2-4b23-46d9-b845-edbe1251fcfb"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.568505 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kh8k4\" (UniqueName: \"kubernetes.io/projected/896342f2-4b23-46d9-b845-edbe1251fcfb-kube-api-access-kh8k4\") pod \"896342f2-4b23-46d9-b845-edbe1251fcfb\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.568557 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-config-data\") pod \"896342f2-4b23-46d9-b845-edbe1251fcfb\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.568715 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-sg-core-conf-yaml\") pod \"896342f2-4b23-46d9-b845-edbe1251fcfb\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.568791 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-run-httpd\") pod \"896342f2-4b23-46d9-b845-edbe1251fcfb\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.568836 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-scripts\") pod \"896342f2-4b23-46d9-b845-edbe1251fcfb\" (UID: \"896342f2-4b23-46d9-b845-edbe1251fcfb\") " Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.569561 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "896342f2-4b23-46d9-b845-edbe1251fcfb" (UID: "896342f2-4b23-46d9-b845-edbe1251fcfb"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.570128 4932 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.570149 4932 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/896342f2-4b23-46d9-b845-edbe1251fcfb-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.574070 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-scripts" (OuterVolumeSpecName: "scripts") pod "896342f2-4b23-46d9-b845-edbe1251fcfb" (UID: "896342f2-4b23-46d9-b845-edbe1251fcfb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.576467 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/896342f2-4b23-46d9-b845-edbe1251fcfb-kube-api-access-kh8k4" (OuterVolumeSpecName: "kube-api-access-kh8k4") pod "896342f2-4b23-46d9-b845-edbe1251fcfb" (UID: "896342f2-4b23-46d9-b845-edbe1251fcfb"). InnerVolumeSpecName "kube-api-access-kh8k4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.600807 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "896342f2-4b23-46d9-b845-edbe1251fcfb" (UID: "896342f2-4b23-46d9-b845-edbe1251fcfb"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.646844 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "896342f2-4b23-46d9-b845-edbe1251fcfb" (UID: "896342f2-4b23-46d9-b845-edbe1251fcfb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.672229 4932 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.672264 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.672276 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.672289 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kh8k4\" (UniqueName: \"kubernetes.io/projected/896342f2-4b23-46d9-b845-edbe1251fcfb-kube-api-access-kh8k4\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.674706 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-config-data" (OuterVolumeSpecName: "config-data") pod "896342f2-4b23-46d9-b845-edbe1251fcfb" (UID: "896342f2-4b23-46d9-b845-edbe1251fcfb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:38:05 crc kubenswrapper[4932]: I1125 10:38:05.774922 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/896342f2-4b23-46d9-b845-edbe1251fcfb-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.136581 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"896342f2-4b23-46d9-b845-edbe1251fcfb","Type":"ContainerDied","Data":"6d41d228eb13052e8b9ced887905dd9be25a797039866ec9878f528b6393bb2c"} Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.136961 4932 scope.go:117] "RemoveContainer" containerID="41b19be7998da6756ea9e9f9c6ede17cc0c5e0d3556700b3d313523150bfd1fb" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.136642 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.177278 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.197307 4932 scope.go:117] "RemoveContainer" containerID="34ac9fd38f4e83bddf5b0ddf6a79e8ae98491d7cba7643fdd946fec3403b0387" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.197523 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.202499 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:38:06 crc kubenswrapper[4932]: E1125 10:38:06.202935 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="sg-core" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.202953 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="sg-core" Nov 25 10:38:06 crc kubenswrapper[4932]: E1125 10:38:06.202975 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="proxy-httpd" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.202981 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="proxy-httpd" Nov 25 10:38:06 crc kubenswrapper[4932]: E1125 10:38:06.203016 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="ceilometer-notification-agent" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.203025 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="ceilometer-notification-agent" Nov 25 10:38:06 crc kubenswrapper[4932]: E1125 10:38:06.203039 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="ceilometer-central-agent" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.203045 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="ceilometer-central-agent" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.203244 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="ceilometer-notification-agent" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.203258 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="proxy-httpd" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.203278 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="ceilometer-central-agent" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.203290 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" containerName="sg-core" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.205292 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.211283 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.211519 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.211524 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.219700 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.246563 4932 scope.go:117] "RemoveContainer" containerID="36df9a0fe8ea0a8c15d5a0ffa11b812939daf35c4a71678f18e1c2dce0800c4a" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.268074 4932 scope.go:117] "RemoveContainer" containerID="bc60b36756c1d0d882150a7f746b0ac81f38fb82bbd7450f9c144b3509cfe1ff" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.388840 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.388911 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f15ae2e4-527c-4d5a-9edb-acc2af44147e-run-httpd\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.389068 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.389107 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-scripts\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.389273 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-config-data\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.389318 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f15ae2e4-527c-4d5a-9edb-acc2af44147e-log-httpd\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.389437 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.389527 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds88x\" (UniqueName: \"kubernetes.io/projected/f15ae2e4-527c-4d5a-9edb-acc2af44147e-kube-api-access-ds88x\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.492242 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.492314 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-scripts\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.492413 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-config-data\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.492478 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f15ae2e4-527c-4d5a-9edb-acc2af44147e-log-httpd\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.492530 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.492575 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds88x\" (UniqueName: \"kubernetes.io/projected/f15ae2e4-527c-4d5a-9edb-acc2af44147e-kube-api-access-ds88x\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.492618 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.493005 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f15ae2e4-527c-4d5a-9edb-acc2af44147e-run-httpd\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.493262 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f15ae2e4-527c-4d5a-9edb-acc2af44147e-log-httpd\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.493365 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f15ae2e4-527c-4d5a-9edb-acc2af44147e-run-httpd\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.497354 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.497797 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.499068 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-config-data\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.500353 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.501757 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f15ae2e4-527c-4d5a-9edb-acc2af44147e-scripts\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.510612 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds88x\" (UniqueName: \"kubernetes.io/projected/f15ae2e4-527c-4d5a-9edb-acc2af44147e-kube-api-access-ds88x\") pod \"ceilometer-0\" (UID: \"f15ae2e4-527c-4d5a-9edb-acc2af44147e\") " pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.538435 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.621995 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="896342f2-4b23-46d9-b845-edbe1251fcfb" path="/var/lib/kubelet/pods/896342f2-4b23-46d9-b845-edbe1251fcfb/volumes" Nov 25 10:38:06 crc kubenswrapper[4932]: I1125 10:38:06.982464 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 10:38:07 crc kubenswrapper[4932]: I1125 10:38:07.147831 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f15ae2e4-527c-4d5a-9edb-acc2af44147e","Type":"ContainerStarted","Data":"060165a3a6d9e0ad43026d5d50e39c8027d7b692a3cd1c583f3e0070c80ffb0f"} Nov 25 10:38:07 crc kubenswrapper[4932]: I1125 10:38:07.606658 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:38:07 crc kubenswrapper[4932]: E1125 10:38:07.606936 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:38:08 crc kubenswrapper[4932]: I1125 10:38:08.160058 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f15ae2e4-527c-4d5a-9edb-acc2af44147e","Type":"ContainerStarted","Data":"a4d91895b80fd151dcd3989063ad0f24f1543c953eaa77494109a6b3f91a6e5a"} Nov 25 10:38:08 crc kubenswrapper[4932]: I1125 10:38:08.472902 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 10:38:09 crc kubenswrapper[4932]: I1125 10:38:09.174515 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f15ae2e4-527c-4d5a-9edb-acc2af44147e","Type":"ContainerStarted","Data":"a3d75560f8128a77cc7b31aae55dd12bb986407198b31c8bc0263ef6a132aefd"} Nov 25 10:38:10 crc kubenswrapper[4932]: I1125 10:38:10.187206 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f15ae2e4-527c-4d5a-9edb-acc2af44147e","Type":"ContainerStarted","Data":"6808ec207c5df07cbdd0b513a4cfabea8b72095bb35c8d3888800aad94d8fa5e"} Nov 25 10:38:11 crc kubenswrapper[4932]: I1125 10:38:11.201131 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f15ae2e4-527c-4d5a-9edb-acc2af44147e","Type":"ContainerStarted","Data":"91deb9e30866c7427a6a6880559b316516ab526ba519b8a03e1cdf8866df6a3e"} Nov 25 10:38:11 crc kubenswrapper[4932]: I1125 10:38:11.201519 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 10:38:11 crc kubenswrapper[4932]: I1125 10:38:11.235401 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.581702447 podStartE2EDuration="5.235378265s" podCreationTimestamp="2025-11-25 10:38:06 +0000 UTC" firstStartedPulling="2025-11-25 10:38:06.991450149 +0000 UTC m=+6547.117479712" lastFinishedPulling="2025-11-25 10:38:10.645125967 +0000 UTC m=+6550.771155530" observedRunningTime="2025-11-25 10:38:11.225275146 +0000 UTC m=+6551.351304709" watchObservedRunningTime="2025-11-25 10:38:11.235378265 +0000 UTC m=+6551.361407828" Nov 25 10:38:13 crc kubenswrapper[4932]: I1125 10:38:13.030547 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wklbb"] Nov 25 10:38:13 crc kubenswrapper[4932]: I1125 10:38:13.039259 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wklbb"] Nov 25 10:38:14 crc kubenswrapper[4932]: I1125 10:38:14.032388 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-fjmbp"] Nov 25 10:38:14 crc kubenswrapper[4932]: I1125 10:38:14.042132 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-fjmbp"] Nov 25 10:38:14 crc kubenswrapper[4932]: I1125 10:38:14.618749 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67c688f2-d79f-41e2-82d5-88b15fd52efd" path="/var/lib/kubelet/pods/67c688f2-d79f-41e2-82d5-88b15fd52efd/volumes" Nov 25 10:38:14 crc kubenswrapper[4932]: I1125 10:38:14.620007 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb104cd1-6cb3-441f-a90b-abd9bbd76cd5" path="/var/lib/kubelet/pods/bb104cd1-6cb3-441f-a90b-abd9bbd76cd5/volumes" Nov 25 10:38:16 crc kubenswrapper[4932]: I1125 10:38:16.920573 4932 scope.go:117] "RemoveContainer" containerID="beec379a0c6b93f3ab1b3875a8bcb93dd45b3bebd60aed89eda8d346216e081f" Nov 25 10:38:16 crc kubenswrapper[4932]: I1125 10:38:16.948646 4932 scope.go:117] "RemoveContainer" containerID="81e34d344ce88b4eb1ff0fd3bb2c1d3b97b252c19f190b83c20a61e09177d506" Nov 25 10:38:17 crc kubenswrapper[4932]: I1125 10:38:17.020619 4932 scope.go:117] "RemoveContainer" containerID="68aca14ebe649e4d42b3757d267d9f35de851988f5416df22e6347f907827e7d" Nov 25 10:38:17 crc kubenswrapper[4932]: I1125 10:38:17.071928 4932 scope.go:117] "RemoveContainer" containerID="4b01ec669e9a950b868d84be0d32b86e3cab6ccbee292c9aa62528c74878ffd5" Nov 25 10:38:17 crc kubenswrapper[4932]: I1125 10:38:17.128497 4932 scope.go:117] "RemoveContainer" containerID="1d6c62ab15a50b591f5c2b83752679505ad726bb12ee0a9c1db2bdd72d2e0b0a" Nov 25 10:38:17 crc kubenswrapper[4932]: I1125 10:38:17.215444 4932 scope.go:117] "RemoveContainer" containerID="359d94849b99a181539f9853ec8b2dc8d84091d1ac4616d3ec7a5c0849c2289d" Nov 25 10:38:17 crc kubenswrapper[4932]: I1125 10:38:17.257794 4932 scope.go:117] "RemoveContainer" containerID="47391dcf8393f2c23b2a4b3459c14008e6d3283d2a862c5dedb8cf5da3d7afb1" Nov 25 10:38:17 crc kubenswrapper[4932]: I1125 10:38:17.296610 4932 scope.go:117] "RemoveContainer" containerID="b4cc984fe03c119ea482429bc2da34118c0cd93ee0c8a55b628d2f018656c444" Nov 25 10:38:17 crc kubenswrapper[4932]: I1125 10:38:17.315302 4932 scope.go:117] "RemoveContainer" containerID="d56bd976dc36285a7b61db82648c2c784c3778d8af93391c304870b55d07e4cc" Nov 25 10:38:19 crc kubenswrapper[4932]: I1125 10:38:19.605636 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:38:19 crc kubenswrapper[4932]: E1125 10:38:19.607406 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:38:28 crc kubenswrapper[4932]: W1125 10:38:28.055239 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod896342f2_4b23_46d9_b845_edbe1251fcfb.slice/crio-41b19be7998da6756ea9e9f9c6ede17cc0c5e0d3556700b3d313523150bfd1fb.scope WatchSource:0}: Error finding container 41b19be7998da6756ea9e9f9c6ede17cc0c5e0d3556700b3d313523150bfd1fb: Status 404 returned error can't find the container with id 41b19be7998da6756ea9e9f9c6ede17cc0c5e0d3556700b3d313523150bfd1fb Nov 25 10:38:28 crc kubenswrapper[4932]: E1125 10:38:28.056484 4932 manager.go:1116] Failed to create existing container: /kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod896342f2_4b23_46d9_b845_edbe1251fcfb.slice/crio-6d41d228eb13052e8b9ced887905dd9be25a797039866ec9878f528b6393bb2c: Error finding container 6d41d228eb13052e8b9ced887905dd9be25a797039866ec9878f528b6393bb2c: Status 404 returned error can't find the container with id 6d41d228eb13052e8b9ced887905dd9be25a797039866ec9878f528b6393bb2c Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.368254 4932 generic.go:334] "Generic (PLEG): container finished" podID="ee193c35-f11b-4708-8d29-0905452cd59e" containerID="ca619bdb9250efe22c9831916715c2ff7bb9bcc0ac6097116203f7638aebae15" exitCode=137 Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.368301 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ee193c35-f11b-4708-8d29-0905452cd59e","Type":"ContainerDied","Data":"ca619bdb9250efe22c9831916715c2ff7bb9bcc0ac6097116203f7638aebae15"} Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.466284 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.473596 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-scripts\") pod \"ee193c35-f11b-4708-8d29-0905452cd59e\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.473673 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-config-data\") pod \"ee193c35-f11b-4708-8d29-0905452cd59e\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.473709 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzgw8\" (UniqueName: \"kubernetes.io/projected/ee193c35-f11b-4708-8d29-0905452cd59e-kube-api-access-kzgw8\") pod \"ee193c35-f11b-4708-8d29-0905452cd59e\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.473762 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-combined-ca-bundle\") pod \"ee193c35-f11b-4708-8d29-0905452cd59e\" (UID: \"ee193c35-f11b-4708-8d29-0905452cd59e\") " Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.489798 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-scripts" (OuterVolumeSpecName: "scripts") pod "ee193c35-f11b-4708-8d29-0905452cd59e" (UID: "ee193c35-f11b-4708-8d29-0905452cd59e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.489956 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee193c35-f11b-4708-8d29-0905452cd59e-kube-api-access-kzgw8" (OuterVolumeSpecName: "kube-api-access-kzgw8") pod "ee193c35-f11b-4708-8d29-0905452cd59e" (UID: "ee193c35-f11b-4708-8d29-0905452cd59e"). InnerVolumeSpecName "kube-api-access-kzgw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.576582 4932 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.576623 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzgw8\" (UniqueName: \"kubernetes.io/projected/ee193c35-f11b-4708-8d29-0905452cd59e-kube-api-access-kzgw8\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.606344 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-config-data" (OuterVolumeSpecName: "config-data") pod "ee193c35-f11b-4708-8d29-0905452cd59e" (UID: "ee193c35-f11b-4708-8d29-0905452cd59e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.618996 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ee193c35-f11b-4708-8d29-0905452cd59e" (UID: "ee193c35-f11b-4708-8d29-0905452cd59e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.679352 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:28 crc kubenswrapper[4932]: I1125 10:38:28.679395 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee193c35-f11b-4708-8d29-0905452cd59e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.379435 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"ee193c35-f11b-4708-8d29-0905452cd59e","Type":"ContainerDied","Data":"7e85f7138473b8835317fa52bd28d29c0abdf2968fa5c578c6deadca4a2164a2"} Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.379797 4932 scope.go:117] "RemoveContainer" containerID="ca619bdb9250efe22c9831916715c2ff7bb9bcc0ac6097116203f7638aebae15" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.379546 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.411119 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.415628 4932 scope.go:117] "RemoveContainer" containerID="eed3d8a9af3782509afdc4b29388b59da5720a47b6c918842b9cda899f4820c9" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.424630 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.447222 4932 scope.go:117] "RemoveContainer" containerID="017e1e62f430a00b0f7fce27fd3cd98d432851b05d6137accebd730df4d51e10" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.448787 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 25 10:38:29 crc kubenswrapper[4932]: E1125 10:38:29.449244 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-evaluator" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.449260 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-evaluator" Nov 25 10:38:29 crc kubenswrapper[4932]: E1125 10:38:29.449275 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-listener" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.449282 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-listener" Nov 25 10:38:29 crc kubenswrapper[4932]: E1125 10:38:29.449313 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-api" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.449320 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-api" Nov 25 10:38:29 crc kubenswrapper[4932]: E1125 10:38:29.449336 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-notifier" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.449342 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-notifier" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.449528 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-notifier" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.449544 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-listener" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.449559 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-api" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.449578 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" containerName="aodh-evaluator" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.452174 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.456510 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.456581 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.456791 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.456815 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-d5glh" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.458109 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.466932 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.469397 4932 scope.go:117] "RemoveContainer" containerID="e094faf54dfa874834aad777d22318cd6bdeee470c2983839e02979549e750cd" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.492961 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bb2l\" (UniqueName: \"kubernetes.io/projected/937396d1-1fdc-4cb2-8897-d7b8b6844208-kube-api-access-5bb2l\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.493051 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-scripts\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.493166 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-combined-ca-bundle\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.493217 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-public-tls-certs\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.493249 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-internal-tls-certs\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.493287 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-config-data\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.595863 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bb2l\" (UniqueName: \"kubernetes.io/projected/937396d1-1fdc-4cb2-8897-d7b8b6844208-kube-api-access-5bb2l\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.595958 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-scripts\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.596048 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-combined-ca-bundle\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.596071 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-public-tls-certs\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.596103 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-internal-tls-certs\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.596133 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-config-data\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.623928 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-public-tls-certs\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.623940 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-internal-tls-certs\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.624213 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-scripts\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.624676 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-combined-ca-bundle\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.625100 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/937396d1-1fdc-4cb2-8897-d7b8b6844208-config-data\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.658808 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bb2l\" (UniqueName: \"kubernetes.io/projected/937396d1-1fdc-4cb2-8897-d7b8b6844208-kube-api-access-5bb2l\") pod \"aodh-0\" (UID: \"937396d1-1fdc-4cb2-8897-d7b8b6844208\") " pod="openstack/aodh-0" Nov 25 10:38:29 crc kubenswrapper[4932]: I1125 10:38:29.771946 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 25 10:38:30 crc kubenswrapper[4932]: I1125 10:38:30.359769 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 25 10:38:30 crc kubenswrapper[4932]: W1125 10:38:30.362880 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod937396d1_1fdc_4cb2_8897_d7b8b6844208.slice/crio-7fbb2e861a2dafff4fd55659a3977e2adebf77b0effb5fe880a1a5a4e418845d WatchSource:0}: Error finding container 7fbb2e861a2dafff4fd55659a3977e2adebf77b0effb5fe880a1a5a4e418845d: Status 404 returned error can't find the container with id 7fbb2e861a2dafff4fd55659a3977e2adebf77b0effb5fe880a1a5a4e418845d Nov 25 10:38:30 crc kubenswrapper[4932]: I1125 10:38:30.392528 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"937396d1-1fdc-4cb2-8897-d7b8b6844208","Type":"ContainerStarted","Data":"7fbb2e861a2dafff4fd55659a3977e2adebf77b0effb5fe880a1a5a4e418845d"} Nov 25 10:38:30 crc kubenswrapper[4932]: I1125 10:38:30.618033 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee193c35-f11b-4708-8d29-0905452cd59e" path="/var/lib/kubelet/pods/ee193c35-f11b-4708-8d29-0905452cd59e/volumes" Nov 25 10:38:31 crc kubenswrapper[4932]: I1125 10:38:31.407141 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"937396d1-1fdc-4cb2-8897-d7b8b6844208","Type":"ContainerStarted","Data":"c986f96b1379531faf132acd303f2fc9339427137d586519ca945d2254573f12"} Nov 25 10:38:32 crc kubenswrapper[4932]: I1125 10:38:32.048637 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-97sj6"] Nov 25 10:38:32 crc kubenswrapper[4932]: I1125 10:38:32.058584 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-97sj6"] Nov 25 10:38:32 crc kubenswrapper[4932]: I1125 10:38:32.417549 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"937396d1-1fdc-4cb2-8897-d7b8b6844208","Type":"ContainerStarted","Data":"a1ff6776d89c2b6cb99304f63351c758891e9e2d2d65c7a556d6acf39cfbfcc1"} Nov 25 10:38:32 crc kubenswrapper[4932]: I1125 10:38:32.618010 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0b67df6-dcee-42ea-a48c-88a17f961dda" path="/var/lib/kubelet/pods/c0b67df6-dcee-42ea-a48c-88a17f961dda/volumes" Nov 25 10:38:33 crc kubenswrapper[4932]: I1125 10:38:33.429203 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"937396d1-1fdc-4cb2-8897-d7b8b6844208","Type":"ContainerStarted","Data":"8ec4ea7890d60fc76938f149e6676f56c5648b5ea80f4936238792a488ce38eb"} Nov 25 10:38:33 crc kubenswrapper[4932]: I1125 10:38:33.605852 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:38:33 crc kubenswrapper[4932]: E1125 10:38:33.606304 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:38:34 crc kubenswrapper[4932]: I1125 10:38:34.483731 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"937396d1-1fdc-4cb2-8897-d7b8b6844208","Type":"ContainerStarted","Data":"915a116ce24fb9a17b1e14c6c26ff92ba895cbbb70bac95a5c54ee05387cde6f"} Nov 25 10:38:34 crc kubenswrapper[4932]: I1125 10:38:34.514140 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.351588253 podStartE2EDuration="5.514116878s" podCreationTimestamp="2025-11-25 10:38:29 +0000 UTC" firstStartedPulling="2025-11-25 10:38:30.365357875 +0000 UTC m=+6570.491387438" lastFinishedPulling="2025-11-25 10:38:33.52788651 +0000 UTC m=+6573.653916063" observedRunningTime="2025-11-25 10:38:34.504565635 +0000 UTC m=+6574.630595198" watchObservedRunningTime="2025-11-25 10:38:34.514116878 +0000 UTC m=+6574.640146441" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.567847 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.589525 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b5f9d786f-p4w87"] Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.591428 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.593484 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.617592 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b5f9d786f-p4w87"] Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.671084 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-config\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.671138 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-nb\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.671169 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bklh\" (UniqueName: \"kubernetes.io/projected/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-kube-api-access-9bklh\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.671206 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-openstack-cell1\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.671336 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-dns-svc\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.671381 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-sb\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.773726 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-dns-svc\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.773802 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-sb\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.773904 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-config\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.773948 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-nb\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.773979 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bklh\" (UniqueName: \"kubernetes.io/projected/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-kube-api-access-9bklh\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.774010 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-openstack-cell1\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.775031 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-openstack-cell1\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.775736 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-dns-svc\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.776374 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-sb\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.776963 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-config\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.777233 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-nb\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.801288 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bklh\" (UniqueName: \"kubernetes.io/projected/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-kube-api-access-9bklh\") pod \"dnsmasq-dns-6b5f9d786f-p4w87\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:36 crc kubenswrapper[4932]: I1125 10:38:36.909576 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:37 crc kubenswrapper[4932]: I1125 10:38:37.435484 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b5f9d786f-p4w87"] Nov 25 10:38:37 crc kubenswrapper[4932]: I1125 10:38:37.510570 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" event={"ID":"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9","Type":"ContainerStarted","Data":"5817f640fa6f2e6f887676cdba8ff0a6c4be9ab968a0781315e5b228c7878385"} Nov 25 10:38:38 crc kubenswrapper[4932]: I1125 10:38:38.521682 4932 generic.go:334] "Generic (PLEG): container finished" podID="4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" containerID="e170c9301035dd3d54a9ee27d539f45d0ba8bf5c7c5253e4b693481e5af93512" exitCode=0 Nov 25 10:38:38 crc kubenswrapper[4932]: I1125 10:38:38.521741 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" event={"ID":"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9","Type":"ContainerDied","Data":"e170c9301035dd3d54a9ee27d539f45d0ba8bf5c7c5253e4b693481e5af93512"} Nov 25 10:38:39 crc kubenswrapper[4932]: I1125 10:38:39.534341 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" event={"ID":"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9","Type":"ContainerStarted","Data":"a28566651023f07fda0a07de6fdfa8be5128e2a910e6167eba6514dded9c2be8"} Nov 25 10:38:39 crc kubenswrapper[4932]: I1125 10:38:39.534863 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:39 crc kubenswrapper[4932]: I1125 10:38:39.554907 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" podStartSLOduration=3.554870193 podStartE2EDuration="3.554870193s" podCreationTimestamp="2025-11-25 10:38:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:38:39.553069991 +0000 UTC m=+6579.679099564" watchObservedRunningTime="2025-11-25 10:38:39.554870193 +0000 UTC m=+6579.680899756" Nov 25 10:38:46 crc kubenswrapper[4932]: I1125 10:38:46.911328 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:38:46 crc kubenswrapper[4932]: I1125 10:38:46.985836 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54d659b679-kgwkm"] Nov 25 10:38:46 crc kubenswrapper[4932]: I1125 10:38:46.987242 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" podUID="382b04e3-773c-4858-b325-752d56d78660" containerName="dnsmasq-dns" containerID="cri-o://393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3" gracePeriod=10 Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.118156 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7b5bc648c5-4695l"] Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.120112 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.154438 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b5bc648c5-4695l"] Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.218013 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-ovsdbserver-nb\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.218163 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-dns-svc\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.218221 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrpgn\" (UniqueName: \"kubernetes.io/projected/08944206-f43f-4976-8668-9a2d900c13fa-kube-api-access-wrpgn\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.218238 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-openstack-cell1\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.218261 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-config\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.218325 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-ovsdbserver-sb\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.323768 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-config\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.323928 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-ovsdbserver-sb\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.323997 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-ovsdbserver-nb\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.324152 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-dns-svc\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.324349 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrpgn\" (UniqueName: \"kubernetes.io/projected/08944206-f43f-4976-8668-9a2d900c13fa-kube-api-access-wrpgn\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.324395 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-openstack-cell1\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.324941 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-config\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.325451 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-dns-svc\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.325455 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-ovsdbserver-nb\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.334014 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-ovsdbserver-sb\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.334126 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/08944206-f43f-4976-8668-9a2d900c13fa-openstack-cell1\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.364377 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrpgn\" (UniqueName: \"kubernetes.io/projected/08944206-f43f-4976-8668-9a2d900c13fa-kube-api-access-wrpgn\") pod \"dnsmasq-dns-7b5bc648c5-4695l\" (UID: \"08944206-f43f-4976-8668-9a2d900c13fa\") " pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.481659 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.501841 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.615557 4932 generic.go:334] "Generic (PLEG): container finished" podID="382b04e3-773c-4858-b325-752d56d78660" containerID="393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3" exitCode=0 Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.615601 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.615622 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" event={"ID":"382b04e3-773c-4858-b325-752d56d78660","Type":"ContainerDied","Data":"393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3"} Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.616215 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54d659b679-kgwkm" event={"ID":"382b04e3-773c-4858-b325-752d56d78660","Type":"ContainerDied","Data":"521ddab3d9c93ae3430ed0941019a182c00236a9859bbb3feb3c2e72043eabb1"} Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.616264 4932 scope.go:117] "RemoveContainer" containerID="393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.634717 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9zkn\" (UniqueName: \"kubernetes.io/projected/382b04e3-773c-4858-b325-752d56d78660-kube-api-access-q9zkn\") pod \"382b04e3-773c-4858-b325-752d56d78660\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.634841 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-config\") pod \"382b04e3-773c-4858-b325-752d56d78660\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.635066 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-sb\") pod \"382b04e3-773c-4858-b325-752d56d78660\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.635129 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-dns-svc\") pod \"382b04e3-773c-4858-b325-752d56d78660\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.635185 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-nb\") pod \"382b04e3-773c-4858-b325-752d56d78660\" (UID: \"382b04e3-773c-4858-b325-752d56d78660\") " Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.643392 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/382b04e3-773c-4858-b325-752d56d78660-kube-api-access-q9zkn" (OuterVolumeSpecName: "kube-api-access-q9zkn") pod "382b04e3-773c-4858-b325-752d56d78660" (UID: "382b04e3-773c-4858-b325-752d56d78660"). InnerVolumeSpecName "kube-api-access-q9zkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.646932 4932 scope.go:117] "RemoveContainer" containerID="885ae74df93c3614d983aff5ac3985c589ec024d4ad30c81f40b900caf74183f" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.697744 4932 scope.go:117] "RemoveContainer" containerID="393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3" Nov 25 10:38:47 crc kubenswrapper[4932]: E1125 10:38:47.698498 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3\": container with ID starting with 393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3 not found: ID does not exist" containerID="393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.698540 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3"} err="failed to get container status \"393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3\": rpc error: code = NotFound desc = could not find container \"393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3\": container with ID starting with 393374797cc91c3ce2e354d804f30bff100fcba4dc0432667a803b1c940e76c3 not found: ID does not exist" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.698560 4932 scope.go:117] "RemoveContainer" containerID="885ae74df93c3614d983aff5ac3985c589ec024d4ad30c81f40b900caf74183f" Nov 25 10:38:47 crc kubenswrapper[4932]: E1125 10:38:47.698950 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"885ae74df93c3614d983aff5ac3985c589ec024d4ad30c81f40b900caf74183f\": container with ID starting with 885ae74df93c3614d983aff5ac3985c589ec024d4ad30c81f40b900caf74183f not found: ID does not exist" containerID="885ae74df93c3614d983aff5ac3985c589ec024d4ad30c81f40b900caf74183f" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.698971 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"885ae74df93c3614d983aff5ac3985c589ec024d4ad30c81f40b900caf74183f"} err="failed to get container status \"885ae74df93c3614d983aff5ac3985c589ec024d4ad30c81f40b900caf74183f\": rpc error: code = NotFound desc = could not find container \"885ae74df93c3614d983aff5ac3985c589ec024d4ad30c81f40b900caf74183f\": container with ID starting with 885ae74df93c3614d983aff5ac3985c589ec024d4ad30c81f40b900caf74183f not found: ID does not exist" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.705828 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-config" (OuterVolumeSpecName: "config") pod "382b04e3-773c-4858-b325-752d56d78660" (UID: "382b04e3-773c-4858-b325-752d56d78660"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.731994 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "382b04e3-773c-4858-b325-752d56d78660" (UID: "382b04e3-773c-4858-b325-752d56d78660"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.738262 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.738563 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9zkn\" (UniqueName: \"kubernetes.io/projected/382b04e3-773c-4858-b325-752d56d78660-kube-api-access-q9zkn\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.738656 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.744016 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "382b04e3-773c-4858-b325-752d56d78660" (UID: "382b04e3-773c-4858-b325-752d56d78660"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.801579 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "382b04e3-773c-4858-b325-752d56d78660" (UID: "382b04e3-773c-4858-b325-752d56d78660"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.841144 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.841248 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/382b04e3-773c-4858-b325-752d56d78660-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.951439 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54d659b679-kgwkm"] Nov 25 10:38:47 crc kubenswrapper[4932]: I1125 10:38:47.960888 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54d659b679-kgwkm"] Nov 25 10:38:48 crc kubenswrapper[4932]: W1125 10:38:48.012877 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08944206_f43f_4976_8668_9a2d900c13fa.slice/crio-674e4e04602edfaf707b9f939b599655ed58c8b42f0224631405f54094b982a2 WatchSource:0}: Error finding container 674e4e04602edfaf707b9f939b599655ed58c8b42f0224631405f54094b982a2: Status 404 returned error can't find the container with id 674e4e04602edfaf707b9f939b599655ed58c8b42f0224631405f54094b982a2 Nov 25 10:38:48 crc kubenswrapper[4932]: I1125 10:38:48.015362 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b5bc648c5-4695l"] Nov 25 10:38:48 crc kubenswrapper[4932]: I1125 10:38:48.606424 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:38:48 crc kubenswrapper[4932]: E1125 10:38:48.607062 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:38:48 crc kubenswrapper[4932]: I1125 10:38:48.620770 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="382b04e3-773c-4858-b325-752d56d78660" path="/var/lib/kubelet/pods/382b04e3-773c-4858-b325-752d56d78660/volumes" Nov 25 10:38:48 crc kubenswrapper[4932]: I1125 10:38:48.628982 4932 generic.go:334] "Generic (PLEG): container finished" podID="08944206-f43f-4976-8668-9a2d900c13fa" containerID="9a2f62eb7a491a496471b80164633574237977cfe6ae36dbd003200858c11f68" exitCode=0 Nov 25 10:38:48 crc kubenswrapper[4932]: I1125 10:38:48.629031 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" event={"ID":"08944206-f43f-4976-8668-9a2d900c13fa","Type":"ContainerDied","Data":"9a2f62eb7a491a496471b80164633574237977cfe6ae36dbd003200858c11f68"} Nov 25 10:38:48 crc kubenswrapper[4932]: I1125 10:38:48.629061 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" event={"ID":"08944206-f43f-4976-8668-9a2d900c13fa","Type":"ContainerStarted","Data":"674e4e04602edfaf707b9f939b599655ed58c8b42f0224631405f54094b982a2"} Nov 25 10:38:49 crc kubenswrapper[4932]: I1125 10:38:49.645709 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" event={"ID":"08944206-f43f-4976-8668-9a2d900c13fa","Type":"ContainerStarted","Data":"5dcbf840dc6b1564882bbc91ab5ae6776693fd874b5c0147c2cacb94232300fa"} Nov 25 10:38:49 crc kubenswrapper[4932]: I1125 10:38:49.647427 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:49 crc kubenswrapper[4932]: I1125 10:38:49.673700 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" podStartSLOduration=2.673676478 podStartE2EDuration="2.673676478s" podCreationTimestamp="2025-11-25 10:38:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:38:49.662719575 +0000 UTC m=+6589.788749148" watchObservedRunningTime="2025-11-25 10:38:49.673676478 +0000 UTC m=+6589.799706041" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.027715 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn"] Nov 25 10:38:53 crc kubenswrapper[4932]: E1125 10:38:53.028719 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="382b04e3-773c-4858-b325-752d56d78660" containerName="init" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.028738 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="382b04e3-773c-4858-b325-752d56d78660" containerName="init" Nov 25 10:38:53 crc kubenswrapper[4932]: E1125 10:38:53.028771 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="382b04e3-773c-4858-b325-752d56d78660" containerName="dnsmasq-dns" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.028779 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="382b04e3-773c-4858-b325-752d56d78660" containerName="dnsmasq-dns" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.029041 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="382b04e3-773c-4858-b325-752d56d78660" containerName="dnsmasq-dns" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.029923 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.031929 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.032518 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.032827 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.033153 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.052534 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn"] Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.158735 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.159022 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4c7s4\" (UniqueName: \"kubernetes.io/projected/adad7674-5563-4865-ad52-13ac00090e46-kube-api-access-4c7s4\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.159124 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.159499 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.261972 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4c7s4\" (UniqueName: \"kubernetes.io/projected/adad7674-5563-4865-ad52-13ac00090e46-kube-api-access-4c7s4\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.262035 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.262125 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.262179 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.268028 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.268410 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.268949 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.283414 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4c7s4\" (UniqueName: \"kubernetes.io/projected/adad7674-5563-4865-ad52-13ac00090e46-kube-api-access-4c7s4\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:53 crc kubenswrapper[4932]: I1125 10:38:53.349815 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:38:54 crc kubenswrapper[4932]: W1125 10:38:54.157299 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podadad7674_5563_4865_ad52_13ac00090e46.slice/crio-d5fcd6fb16ea1dfe141bc0649a905ab7f24b1486fe5f0d8f50f1a90a9f7a481f WatchSource:0}: Error finding container d5fcd6fb16ea1dfe141bc0649a905ab7f24b1486fe5f0d8f50f1a90a9f7a481f: Status 404 returned error can't find the container with id d5fcd6fb16ea1dfe141bc0649a905ab7f24b1486fe5f0d8f50f1a90a9f7a481f Nov 25 10:38:54 crc kubenswrapper[4932]: I1125 10:38:54.159073 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn"] Nov 25 10:38:54 crc kubenswrapper[4932]: I1125 10:38:54.699776 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" event={"ID":"adad7674-5563-4865-ad52-13ac00090e46","Type":"ContainerStarted","Data":"d5fcd6fb16ea1dfe141bc0649a905ab7f24b1486fe5f0d8f50f1a90a9f7a481f"} Nov 25 10:38:57 crc kubenswrapper[4932]: I1125 10:38:57.483585 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7b5bc648c5-4695l" Nov 25 10:38:57 crc kubenswrapper[4932]: I1125 10:38:57.559873 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b5f9d786f-p4w87"] Nov 25 10:38:57 crc kubenswrapper[4932]: I1125 10:38:57.560428 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" podUID="4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" containerName="dnsmasq-dns" containerID="cri-o://a28566651023f07fda0a07de6fdfa8be5128e2a910e6167eba6514dded9c2be8" gracePeriod=10 Nov 25 10:38:57 crc kubenswrapper[4932]: I1125 10:38:57.738737 4932 generic.go:334] "Generic (PLEG): container finished" podID="4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" containerID="a28566651023f07fda0a07de6fdfa8be5128e2a910e6167eba6514dded9c2be8" exitCode=0 Nov 25 10:38:57 crc kubenswrapper[4932]: I1125 10:38:57.738778 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" event={"ID":"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9","Type":"ContainerDied","Data":"a28566651023f07fda0a07de6fdfa8be5128e2a910e6167eba6514dded9c2be8"} Nov 25 10:39:01 crc kubenswrapper[4932]: I1125 10:39:01.910456 4932 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" podUID="4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.166:5353: connect: connection refused" Nov 25 10:39:02 crc kubenswrapper[4932]: I1125 10:39:02.607465 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:39:02 crc kubenswrapper[4932]: E1125 10:39:02.608042 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.755813 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.814380 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-nb\") pod \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.814472 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-config\") pod \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.814556 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-openstack-cell1\") pod \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.814592 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-dns-svc\") pod \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.814617 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bklh\" (UniqueName: \"kubernetes.io/projected/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-kube-api-access-9bklh\") pod \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.815541 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-sb\") pod \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\" (UID: \"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9\") " Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.825901 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-kube-api-access-9bklh" (OuterVolumeSpecName: "kube-api-access-9bklh") pod "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" (UID: "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9"). InnerVolumeSpecName "kube-api-access-9bklh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.837812 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" event={"ID":"4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9","Type":"ContainerDied","Data":"5817f640fa6f2e6f887676cdba8ff0a6c4be9ab968a0781315e5b228c7878385"} Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.837872 4932 scope.go:117] "RemoveContainer" containerID="a28566651023f07fda0a07de6fdfa8be5128e2a910e6167eba6514dded9c2be8" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.838498 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b5f9d786f-p4w87" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.875323 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" (UID: "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.875977 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-config" (OuterVolumeSpecName: "config") pod "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" (UID: "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.889895 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" (UID: "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.890297 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" (UID: "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.899613 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" (UID: "4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.918268 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.918293 4932 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.918304 4932 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-config\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.918314 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.918324 4932 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:03 crc kubenswrapper[4932]: I1125 10:39:03.918333 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bklh\" (UniqueName: \"kubernetes.io/projected/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9-kube-api-access-9bklh\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:04 crc kubenswrapper[4932]: I1125 10:39:04.194234 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b5f9d786f-p4w87"] Nov 25 10:39:04 crc kubenswrapper[4932]: I1125 10:39:04.203384 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b5f9d786f-p4w87"] Nov 25 10:39:04 crc kubenswrapper[4932]: I1125 10:39:04.506722 4932 scope.go:117] "RemoveContainer" containerID="e170c9301035dd3d54a9ee27d539f45d0ba8bf5c7c5253e4b693481e5af93512" Nov 25 10:39:04 crc kubenswrapper[4932]: I1125 10:39:04.538027 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:39:04 crc kubenswrapper[4932]: I1125 10:39:04.633116 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" path="/var/lib/kubelet/pods/4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9/volumes" Nov 25 10:39:05 crc kubenswrapper[4932]: I1125 10:39:05.875669 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" event={"ID":"adad7674-5563-4865-ad52-13ac00090e46","Type":"ContainerStarted","Data":"f8f090aad07628125abdfd7c7474c86c0a4bcbf13cae9a1b53e111667146b4d7"} Nov 25 10:39:05 crc kubenswrapper[4932]: I1125 10:39:05.894890 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" podStartSLOduration=2.519102815 podStartE2EDuration="12.894867963s" podCreationTimestamp="2025-11-25 10:38:53 +0000 UTC" firstStartedPulling="2025-11-25 10:38:54.159931728 +0000 UTC m=+6594.285961301" lastFinishedPulling="2025-11-25 10:39:04.535696876 +0000 UTC m=+6604.661726449" observedRunningTime="2025-11-25 10:39:05.891879188 +0000 UTC m=+6606.017908751" watchObservedRunningTime="2025-11-25 10:39:05.894867963 +0000 UTC m=+6606.020897546" Nov 25 10:39:16 crc kubenswrapper[4932]: I1125 10:39:16.606623 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:39:16 crc kubenswrapper[4932]: E1125 10:39:16.607163 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:39:17 crc kubenswrapper[4932]: I1125 10:39:17.528177 4932 scope.go:117] "RemoveContainer" containerID="2219ca4cd60852dcc5bf4c82697b810c23526082cb017dd46454887bdb3efafe" Nov 25 10:39:17 crc kubenswrapper[4932]: I1125 10:39:17.994692 4932 generic.go:334] "Generic (PLEG): container finished" podID="adad7674-5563-4865-ad52-13ac00090e46" containerID="f8f090aad07628125abdfd7c7474c86c0a4bcbf13cae9a1b53e111667146b4d7" exitCode=0 Nov 25 10:39:17 crc kubenswrapper[4932]: I1125 10:39:17.994737 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" event={"ID":"adad7674-5563-4865-ad52-13ac00090e46","Type":"ContainerDied","Data":"f8f090aad07628125abdfd7c7474c86c0a4bcbf13cae9a1b53e111667146b4d7"} Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.471775 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.561749 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4c7s4\" (UniqueName: \"kubernetes.io/projected/adad7674-5563-4865-ad52-13ac00090e46-kube-api-access-4c7s4\") pod \"adad7674-5563-4865-ad52-13ac00090e46\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.561946 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-inventory\") pod \"adad7674-5563-4865-ad52-13ac00090e46\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.562072 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-ssh-key\") pod \"adad7674-5563-4865-ad52-13ac00090e46\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.562423 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-pre-adoption-validation-combined-ca-bundle\") pod \"adad7674-5563-4865-ad52-13ac00090e46\" (UID: \"adad7674-5563-4865-ad52-13ac00090e46\") " Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.567349 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adad7674-5563-4865-ad52-13ac00090e46-kube-api-access-4c7s4" (OuterVolumeSpecName: "kube-api-access-4c7s4") pod "adad7674-5563-4865-ad52-13ac00090e46" (UID: "adad7674-5563-4865-ad52-13ac00090e46"). InnerVolumeSpecName "kube-api-access-4c7s4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.567497 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "adad7674-5563-4865-ad52-13ac00090e46" (UID: "adad7674-5563-4865-ad52-13ac00090e46"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.596454 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-inventory" (OuterVolumeSpecName: "inventory") pod "adad7674-5563-4865-ad52-13ac00090e46" (UID: "adad7674-5563-4865-ad52-13ac00090e46"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.609342 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "adad7674-5563-4865-ad52-13ac00090e46" (UID: "adad7674-5563-4865-ad52-13ac00090e46"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.666012 4932 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.666053 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4c7s4\" (UniqueName: \"kubernetes.io/projected/adad7674-5563-4865-ad52-13ac00090e46-kube-api-access-4c7s4\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.666066 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:19 crc kubenswrapper[4932]: I1125 10:39:19.666077 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/adad7674-5563-4865-ad52-13ac00090e46-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:39:20 crc kubenswrapper[4932]: I1125 10:39:20.011667 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" event={"ID":"adad7674-5563-4865-ad52-13ac00090e46","Type":"ContainerDied","Data":"d5fcd6fb16ea1dfe141bc0649a905ab7f24b1486fe5f0d8f50f1a90a9f7a481f"} Nov 25 10:39:20 crc kubenswrapper[4932]: I1125 10:39:20.012023 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d5fcd6fb16ea1dfe141bc0649a905ab7f24b1486fe5f0d8f50f1a90a9f7a481f" Nov 25 10:39:20 crc kubenswrapper[4932]: I1125 10:39:20.011782 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-ct2bvn" Nov 25 10:39:29 crc kubenswrapper[4932]: I1125 10:39:29.606872 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:39:29 crc kubenswrapper[4932]: E1125 10:39:29.608151 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.583631 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9"] Nov 25 10:39:30 crc kubenswrapper[4932]: E1125 10:39:30.584381 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" containerName="dnsmasq-dns" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.584402 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" containerName="dnsmasq-dns" Nov 25 10:39:30 crc kubenswrapper[4932]: E1125 10:39:30.584431 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adad7674-5563-4865-ad52-13ac00090e46" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.584441 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="adad7674-5563-4865-ad52-13ac00090e46" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 25 10:39:30 crc kubenswrapper[4932]: E1125 10:39:30.584463 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" containerName="init" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.584471 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" containerName="init" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.585767 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4200d3ad-bbeb-4e0c-9052-71f60a4d9ea9" containerName="dnsmasq-dns" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.585811 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="adad7674-5563-4865-ad52-13ac00090e46" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.586706 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.588723 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.589897 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.590310 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.590364 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.603978 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.605452 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.606055 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6h7t8\" (UniqueName: \"kubernetes.io/projected/87b34be8-c901-488e-b049-a745b41c53c7-kube-api-access-6h7t8\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.609385 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.635052 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9"] Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.711716 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.711785 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6h7t8\" (UniqueName: \"kubernetes.io/projected/87b34be8-c901-488e-b049-a745b41c53c7-kube-api-access-6h7t8\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.711813 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.711890 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.717794 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.717678 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.717857 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.729550 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6h7t8\" (UniqueName: \"kubernetes.io/projected/87b34be8-c901-488e-b049-a745b41c53c7-kube-api-access-6h7t8\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:30 crc kubenswrapper[4932]: I1125 10:39:30.914308 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:39:31 crc kubenswrapper[4932]: I1125 10:39:31.452792 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9"] Nov 25 10:39:32 crc kubenswrapper[4932]: I1125 10:39:32.126075 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" event={"ID":"87b34be8-c901-488e-b049-a745b41c53c7","Type":"ContainerStarted","Data":"97d3332844f14ea6305fb61c16e8846044a3f6529e5fa3f9f1c29dcfd8caf281"} Nov 25 10:39:33 crc kubenswrapper[4932]: I1125 10:39:33.152450 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" event={"ID":"87b34be8-c901-488e-b049-a745b41c53c7","Type":"ContainerStarted","Data":"cfaddc31a5ce3ba0e1a6d565fd15df7b3658b2cf8286daba1063f787c684ba73"} Nov 25 10:39:33 crc kubenswrapper[4932]: I1125 10:39:33.193818 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" podStartSLOduration=2.727351215 podStartE2EDuration="3.193792991s" podCreationTimestamp="2025-11-25 10:39:30 +0000 UTC" firstStartedPulling="2025-11-25 10:39:31.465709197 +0000 UTC m=+6631.591738760" lastFinishedPulling="2025-11-25 10:39:31.932150973 +0000 UTC m=+6632.058180536" observedRunningTime="2025-11-25 10:39:33.183895208 +0000 UTC m=+6633.309924771" watchObservedRunningTime="2025-11-25 10:39:33.193792991 +0000 UTC m=+6633.319822554" Nov 25 10:39:40 crc kubenswrapper[4932]: I1125 10:39:40.614036 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:39:40 crc kubenswrapper[4932]: E1125 10:39:40.614734 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.732167 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8p7r8"] Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.735494 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.748666 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8p7r8"] Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.863423 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-utilities\") pod \"redhat-operators-8p7r8\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.863813 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xttns\" (UniqueName: \"kubernetes.io/projected/277022dd-c2da-4c3f-893a-bb7e355a29bd-kube-api-access-xttns\") pod \"redhat-operators-8p7r8\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.863943 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-catalog-content\") pod \"redhat-operators-8p7r8\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.965746 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xttns\" (UniqueName: \"kubernetes.io/projected/277022dd-c2da-4c3f-893a-bb7e355a29bd-kube-api-access-xttns\") pod \"redhat-operators-8p7r8\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.965835 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-catalog-content\") pod \"redhat-operators-8p7r8\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.965918 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-utilities\") pod \"redhat-operators-8p7r8\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.966506 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-catalog-content\") pod \"redhat-operators-8p7r8\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.966584 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-utilities\") pod \"redhat-operators-8p7r8\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:42 crc kubenswrapper[4932]: I1125 10:39:42.988360 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xttns\" (UniqueName: \"kubernetes.io/projected/277022dd-c2da-4c3f-893a-bb7e355a29bd-kube-api-access-xttns\") pod \"redhat-operators-8p7r8\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:43 crc kubenswrapper[4932]: I1125 10:39:43.069419 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:43 crc kubenswrapper[4932]: I1125 10:39:43.541570 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8p7r8"] Nov 25 10:39:43 crc kubenswrapper[4932]: W1125 10:39:43.561476 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod277022dd_c2da_4c3f_893a_bb7e355a29bd.slice/crio-60d47979455ba002eea226628df26c4b9e2244627b9415e90fd065d15bfb86a0 WatchSource:0}: Error finding container 60d47979455ba002eea226628df26c4b9e2244627b9415e90fd065d15bfb86a0: Status 404 returned error can't find the container with id 60d47979455ba002eea226628df26c4b9e2244627b9415e90fd065d15bfb86a0 Nov 25 10:39:44 crc kubenswrapper[4932]: I1125 10:39:44.272227 4932 generic.go:334] "Generic (PLEG): container finished" podID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerID="7efa8a050900f1147000ed6ee9bc36434de6ccff93de1c4dfd89728cced33547" exitCode=0 Nov 25 10:39:44 crc kubenswrapper[4932]: I1125 10:39:44.272543 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p7r8" event={"ID":"277022dd-c2da-4c3f-893a-bb7e355a29bd","Type":"ContainerDied","Data":"7efa8a050900f1147000ed6ee9bc36434de6ccff93de1c4dfd89728cced33547"} Nov 25 10:39:44 crc kubenswrapper[4932]: I1125 10:39:44.272572 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p7r8" event={"ID":"277022dd-c2da-4c3f-893a-bb7e355a29bd","Type":"ContainerStarted","Data":"60d47979455ba002eea226628df26c4b9e2244627b9415e90fd065d15bfb86a0"} Nov 25 10:39:46 crc kubenswrapper[4932]: I1125 10:39:46.296545 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p7r8" event={"ID":"277022dd-c2da-4c3f-893a-bb7e355a29bd","Type":"ContainerStarted","Data":"a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de"} Nov 25 10:39:51 crc kubenswrapper[4932]: I1125 10:39:51.344635 4932 generic.go:334] "Generic (PLEG): container finished" podID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerID="a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de" exitCode=0 Nov 25 10:39:51 crc kubenswrapper[4932]: I1125 10:39:51.344710 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p7r8" event={"ID":"277022dd-c2da-4c3f-893a-bb7e355a29bd","Type":"ContainerDied","Data":"a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de"} Nov 25 10:39:52 crc kubenswrapper[4932]: I1125 10:39:52.358239 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p7r8" event={"ID":"277022dd-c2da-4c3f-893a-bb7e355a29bd","Type":"ContainerStarted","Data":"a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88"} Nov 25 10:39:52 crc kubenswrapper[4932]: I1125 10:39:52.381843 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8p7r8" podStartSLOduration=2.867839184 podStartE2EDuration="10.381823782s" podCreationTimestamp="2025-11-25 10:39:42 +0000 UTC" firstStartedPulling="2025-11-25 10:39:44.274146818 +0000 UTC m=+6644.400176381" lastFinishedPulling="2025-11-25 10:39:51.788131416 +0000 UTC m=+6651.914160979" observedRunningTime="2025-11-25 10:39:52.379421033 +0000 UTC m=+6652.505450616" watchObservedRunningTime="2025-11-25 10:39:52.381823782 +0000 UTC m=+6652.507853355" Nov 25 10:39:53 crc kubenswrapper[4932]: I1125 10:39:53.070457 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:53 crc kubenswrapper[4932]: I1125 10:39:53.070927 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:39:54 crc kubenswrapper[4932]: I1125 10:39:54.123951 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8p7r8" podUID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerName="registry-server" probeResult="failure" output=< Nov 25 10:39:54 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 10:39:54 crc kubenswrapper[4932]: > Nov 25 10:39:55 crc kubenswrapper[4932]: I1125 10:39:55.606408 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:39:55 crc kubenswrapper[4932]: E1125 10:39:55.607102 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:40:03 crc kubenswrapper[4932]: I1125 10:40:03.125814 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:40:03 crc kubenswrapper[4932]: I1125 10:40:03.186250 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:40:03 crc kubenswrapper[4932]: I1125 10:40:03.366302 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8p7r8"] Nov 25 10:40:04 crc kubenswrapper[4932]: I1125 10:40:04.486365 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8p7r8" podUID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerName="registry-server" containerID="cri-o://a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88" gracePeriod=2 Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.482622 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.497050 4932 generic.go:334] "Generic (PLEG): container finished" podID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerID="a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88" exitCode=0 Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.497091 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8p7r8" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.497095 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p7r8" event={"ID":"277022dd-c2da-4c3f-893a-bb7e355a29bd","Type":"ContainerDied","Data":"a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88"} Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.497123 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8p7r8" event={"ID":"277022dd-c2da-4c3f-893a-bb7e355a29bd","Type":"ContainerDied","Data":"60d47979455ba002eea226628df26c4b9e2244627b9415e90fd065d15bfb86a0"} Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.497142 4932 scope.go:117] "RemoveContainer" containerID="a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.534006 4932 scope.go:117] "RemoveContainer" containerID="a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.553708 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-catalog-content\") pod \"277022dd-c2da-4c3f-893a-bb7e355a29bd\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.553855 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-utilities\") pod \"277022dd-c2da-4c3f-893a-bb7e355a29bd\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.553927 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xttns\" (UniqueName: \"kubernetes.io/projected/277022dd-c2da-4c3f-893a-bb7e355a29bd-kube-api-access-xttns\") pod \"277022dd-c2da-4c3f-893a-bb7e355a29bd\" (UID: \"277022dd-c2da-4c3f-893a-bb7e355a29bd\") " Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.555507 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-utilities" (OuterVolumeSpecName: "utilities") pod "277022dd-c2da-4c3f-893a-bb7e355a29bd" (UID: "277022dd-c2da-4c3f-893a-bb7e355a29bd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.561474 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/277022dd-c2da-4c3f-893a-bb7e355a29bd-kube-api-access-xttns" (OuterVolumeSpecName: "kube-api-access-xttns") pod "277022dd-c2da-4c3f-893a-bb7e355a29bd" (UID: "277022dd-c2da-4c3f-893a-bb7e355a29bd"). InnerVolumeSpecName "kube-api-access-xttns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.569762 4932 scope.go:117] "RemoveContainer" containerID="7efa8a050900f1147000ed6ee9bc36434de6ccff93de1c4dfd89728cced33547" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.654636 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "277022dd-c2da-4c3f-893a-bb7e355a29bd" (UID: "277022dd-c2da-4c3f-893a-bb7e355a29bd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.656488 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.656543 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/277022dd-c2da-4c3f-893a-bb7e355a29bd-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.656566 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xttns\" (UniqueName: \"kubernetes.io/projected/277022dd-c2da-4c3f-893a-bb7e355a29bd-kube-api-access-xttns\") on node \"crc\" DevicePath \"\"" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.665218 4932 scope.go:117] "RemoveContainer" containerID="a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88" Nov 25 10:40:05 crc kubenswrapper[4932]: E1125 10:40:05.665684 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88\": container with ID starting with a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88 not found: ID does not exist" containerID="a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.665722 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88"} err="failed to get container status \"a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88\": rpc error: code = NotFound desc = could not find container \"a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88\": container with ID starting with a6dce58f98c71df43408e69418d9b560b5c2c5a7a52ddeadcc599d04886b6f88 not found: ID does not exist" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.665741 4932 scope.go:117] "RemoveContainer" containerID="a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de" Nov 25 10:40:05 crc kubenswrapper[4932]: E1125 10:40:05.666102 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de\": container with ID starting with a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de not found: ID does not exist" containerID="a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.666131 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de"} err="failed to get container status \"a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de\": rpc error: code = NotFound desc = could not find container \"a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de\": container with ID starting with a7cbd6340830c5020caf8f5edcc7761a10bd7263e56a092b71b265f4b25711de not found: ID does not exist" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.666149 4932 scope.go:117] "RemoveContainer" containerID="7efa8a050900f1147000ed6ee9bc36434de6ccff93de1c4dfd89728cced33547" Nov 25 10:40:05 crc kubenswrapper[4932]: E1125 10:40:05.666516 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7efa8a050900f1147000ed6ee9bc36434de6ccff93de1c4dfd89728cced33547\": container with ID starting with 7efa8a050900f1147000ed6ee9bc36434de6ccff93de1c4dfd89728cced33547 not found: ID does not exist" containerID="7efa8a050900f1147000ed6ee9bc36434de6ccff93de1c4dfd89728cced33547" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.666623 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7efa8a050900f1147000ed6ee9bc36434de6ccff93de1c4dfd89728cced33547"} err="failed to get container status \"7efa8a050900f1147000ed6ee9bc36434de6ccff93de1c4dfd89728cced33547\": rpc error: code = NotFound desc = could not find container \"7efa8a050900f1147000ed6ee9bc36434de6ccff93de1c4dfd89728cced33547\": container with ID starting with 7efa8a050900f1147000ed6ee9bc36434de6ccff93de1c4dfd89728cced33547 not found: ID does not exist" Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.831587 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8p7r8"] Nov 25 10:40:05 crc kubenswrapper[4932]: I1125 10:40:05.839877 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8p7r8"] Nov 25 10:40:06 crc kubenswrapper[4932]: I1125 10:40:06.619232 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="277022dd-c2da-4c3f-893a-bb7e355a29bd" path="/var/lib/kubelet/pods/277022dd-c2da-4c3f-893a-bb7e355a29bd/volumes" Nov 25 10:40:08 crc kubenswrapper[4932]: I1125 10:40:08.040448 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-create-jvzv8"] Nov 25 10:40:08 crc kubenswrapper[4932]: I1125 10:40:08.049973 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-create-jvzv8"] Nov 25 10:40:08 crc kubenswrapper[4932]: I1125 10:40:08.618266 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb5c2557-70fa-42c9-b3c1-739a5a34a558" path="/var/lib/kubelet/pods/fb5c2557-70fa-42c9-b3c1-739a5a34a558/volumes" Nov 25 10:40:09 crc kubenswrapper[4932]: I1125 10:40:09.605778 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:40:09 crc kubenswrapper[4932]: E1125 10:40:09.606452 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:40:10 crc kubenswrapper[4932]: I1125 10:40:10.035853 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-f71b-account-create-jpl8d"] Nov 25 10:40:10 crc kubenswrapper[4932]: I1125 10:40:10.047168 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-f71b-account-create-jpl8d"] Nov 25 10:40:10 crc kubenswrapper[4932]: I1125 10:40:10.619531 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a7eaed6-9c4a-4070-92b9-438cf5907c7e" path="/var/lib/kubelet/pods/9a7eaed6-9c4a-4070-92b9-438cf5907c7e/volumes" Nov 25 10:40:15 crc kubenswrapper[4932]: I1125 10:40:15.034004 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-persistence-db-create-pr4w7"] Nov 25 10:40:15 crc kubenswrapper[4932]: I1125 10:40:15.041725 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-persistence-db-create-pr4w7"] Nov 25 10:40:16 crc kubenswrapper[4932]: I1125 10:40:16.029408 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-3074-account-create-rlmqt"] Nov 25 10:40:16 crc kubenswrapper[4932]: I1125 10:40:16.038824 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-3074-account-create-rlmqt"] Nov 25 10:40:16 crc kubenswrapper[4932]: I1125 10:40:16.618163 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16444bcb-2d63-4893-9ced-b0a5375eb0de" path="/var/lib/kubelet/pods/16444bcb-2d63-4893-9ced-b0a5375eb0de/volumes" Nov 25 10:40:16 crc kubenswrapper[4932]: I1125 10:40:16.619278 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bac5b437-403a-4a3e-a84e-163690732ab3" path="/var/lib/kubelet/pods/bac5b437-403a-4a3e-a84e-163690732ab3/volumes" Nov 25 10:40:17 crc kubenswrapper[4932]: I1125 10:40:17.708534 4932 scope.go:117] "RemoveContainer" containerID="4941c3d95449ab909f380a85fedb642939a5ad3dc11f8d150ef802dd61a46216" Nov 25 10:40:17 crc kubenswrapper[4932]: I1125 10:40:17.732636 4932 scope.go:117] "RemoveContainer" containerID="c6916dc69497bd79d14d72ce57febec6f4a3e3d687336380e228671fa6100620" Nov 25 10:40:17 crc kubenswrapper[4932]: I1125 10:40:17.784562 4932 scope.go:117] "RemoveContainer" containerID="69196289d0e87f4d54b9f63b017f3883215ba42a950074a71af424266d39e925" Nov 25 10:40:17 crc kubenswrapper[4932]: I1125 10:40:17.828510 4932 scope.go:117] "RemoveContainer" containerID="248889e36b5e6688aa0c60593add36d74d08b73d7d739712cae25f5b5a0d4520" Nov 25 10:40:22 crc kubenswrapper[4932]: I1125 10:40:22.606852 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:40:22 crc kubenswrapper[4932]: E1125 10:40:22.607915 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:40:34 crc kubenswrapper[4932]: I1125 10:40:34.607027 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:40:34 crc kubenswrapper[4932]: E1125 10:40:34.608029 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:40:45 crc kubenswrapper[4932]: I1125 10:40:45.607770 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:40:45 crc kubenswrapper[4932]: I1125 10:40:45.892524 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"f89f493934976ffb7be7739a40343cc5d02766d169103db7c02e69f490bc4c4c"} Nov 25 10:40:55 crc kubenswrapper[4932]: I1125 10:40:55.047700 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-sync-dfpz5"] Nov 25 10:40:55 crc kubenswrapper[4932]: I1125 10:40:55.059130 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-sync-dfpz5"] Nov 25 10:40:56 crc kubenswrapper[4932]: I1125 10:40:56.618095 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4826971d-c5a6-4c1a-be73-58a94eb21dd9" path="/var/lib/kubelet/pods/4826971d-c5a6-4c1a-be73-58a94eb21dd9/volumes" Nov 25 10:41:17 crc kubenswrapper[4932]: I1125 10:41:17.973902 4932 scope.go:117] "RemoveContainer" containerID="3a7caf3900268974e4a495aeed77b7e4e31df39ff7b96aa8d049e45540f9720a" Nov 25 10:41:17 crc kubenswrapper[4932]: I1125 10:41:17.998409 4932 scope.go:117] "RemoveContainer" containerID="3ecf39e075708b3d8717dc54dde1f9b39c6b6404c4d3ca37fc7d6dce77e7707b" Nov 25 10:43:07 crc kubenswrapper[4932]: I1125 10:43:07.180954 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:43:07 crc kubenswrapper[4932]: I1125 10:43:07.181541 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:43:37 crc kubenswrapper[4932]: I1125 10:43:37.180921 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:43:37 crc kubenswrapper[4932]: I1125 10:43:37.181543 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:44:07 crc kubenswrapper[4932]: I1125 10:44:07.181216 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:44:07 crc kubenswrapper[4932]: I1125 10:44:07.181778 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:44:07 crc kubenswrapper[4932]: I1125 10:44:07.181829 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 10:44:07 crc kubenswrapper[4932]: I1125 10:44:07.182686 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f89f493934976ffb7be7739a40343cc5d02766d169103db7c02e69f490bc4c4c"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:44:07 crc kubenswrapper[4932]: I1125 10:44:07.182739 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://f89f493934976ffb7be7739a40343cc5d02766d169103db7c02e69f490bc4c4c" gracePeriod=600 Nov 25 10:44:08 crc kubenswrapper[4932]: I1125 10:44:08.123086 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="f89f493934976ffb7be7739a40343cc5d02766d169103db7c02e69f490bc4c4c" exitCode=0 Nov 25 10:44:08 crc kubenswrapper[4932]: I1125 10:44:08.123161 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"f89f493934976ffb7be7739a40343cc5d02766d169103db7c02e69f490bc4c4c"} Nov 25 10:44:08 crc kubenswrapper[4932]: I1125 10:44:08.123733 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66"} Nov 25 10:44:08 crc kubenswrapper[4932]: I1125 10:44:08.123771 4932 scope.go:117] "RemoveContainer" containerID="234524e4df96e312823996a42d0fda9b73f914cd2a26688c9d025f42ff9321c1" Nov 25 10:44:52 crc kubenswrapper[4932]: I1125 10:44:52.042310 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-0352-account-create-tcrpp"] Nov 25 10:44:52 crc kubenswrapper[4932]: I1125 10:44:52.050620 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-wlqvh"] Nov 25 10:44:52 crc kubenswrapper[4932]: I1125 10:44:52.059974 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-0352-account-create-tcrpp"] Nov 25 10:44:52 crc kubenswrapper[4932]: I1125 10:44:52.068741 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-wlqvh"] Nov 25 10:44:52 crc kubenswrapper[4932]: I1125 10:44:52.618961 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65dc564e-efa5-4236-bcb5-fb93b7b22df6" path="/var/lib/kubelet/pods/65dc564e-efa5-4236-bcb5-fb93b7b22df6/volumes" Nov 25 10:44:52 crc kubenswrapper[4932]: I1125 10:44:52.619910 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7834cb6-728c-43bb-8de6-47dd0ed632ca" path="/var/lib/kubelet/pods/f7834cb6-728c-43bb-8de6-47dd0ed632ca/volumes" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.167481 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw"] Nov 25 10:45:00 crc kubenswrapper[4932]: E1125 10:45:00.169214 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.169241 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4932]: E1125 10:45:00.169287 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerName="extract-utilities" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.169295 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerName="extract-utilities" Nov 25 10:45:00 crc kubenswrapper[4932]: E1125 10:45:00.169308 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerName="extract-content" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.169316 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerName="extract-content" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.169570 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="277022dd-c2da-4c3f-893a-bb7e355a29bd" containerName="registry-server" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.170966 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.173525 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.175121 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.177937 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw"] Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.254142 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ef03811f-1312-4771-b2bb-f493f5782caa-secret-volume\") pod \"collect-profiles-29401125-m2jfw\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.254411 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l967h\" (UniqueName: \"kubernetes.io/projected/ef03811f-1312-4771-b2bb-f493f5782caa-kube-api-access-l967h\") pod \"collect-profiles-29401125-m2jfw\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.254448 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ef03811f-1312-4771-b2bb-f493f5782caa-config-volume\") pod \"collect-profiles-29401125-m2jfw\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.356392 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ef03811f-1312-4771-b2bb-f493f5782caa-secret-volume\") pod \"collect-profiles-29401125-m2jfw\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.356620 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l967h\" (UniqueName: \"kubernetes.io/projected/ef03811f-1312-4771-b2bb-f493f5782caa-kube-api-access-l967h\") pod \"collect-profiles-29401125-m2jfw\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.356657 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ef03811f-1312-4771-b2bb-f493f5782caa-config-volume\") pod \"collect-profiles-29401125-m2jfw\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.357892 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ef03811f-1312-4771-b2bb-f493f5782caa-config-volume\") pod \"collect-profiles-29401125-m2jfw\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.367771 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ef03811f-1312-4771-b2bb-f493f5782caa-secret-volume\") pod \"collect-profiles-29401125-m2jfw\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.374018 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l967h\" (UniqueName: \"kubernetes.io/projected/ef03811f-1312-4771-b2bb-f493f5782caa-kube-api-access-l967h\") pod \"collect-profiles-29401125-m2jfw\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:00 crc kubenswrapper[4932]: I1125 10:45:00.501049 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:01 crc kubenswrapper[4932]: I1125 10:45:01.005866 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw"] Nov 25 10:45:01 crc kubenswrapper[4932]: I1125 10:45:01.627031 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" event={"ID":"ef03811f-1312-4771-b2bb-f493f5782caa","Type":"ContainerStarted","Data":"58fcad50e72360f9145c0fb641fbcaad3f52d00a72ba7d131cd9a1bf6f9eb751"} Nov 25 10:45:01 crc kubenswrapper[4932]: I1125 10:45:01.627394 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" event={"ID":"ef03811f-1312-4771-b2bb-f493f5782caa","Type":"ContainerStarted","Data":"92cd8999d0649796be9d5d963b7f366e219a37f2eac32843d3ffa1e8ea132fce"} Nov 25 10:45:01 crc kubenswrapper[4932]: I1125 10:45:01.644732 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" podStartSLOduration=1.6447159550000001 podStartE2EDuration="1.644715955s" podCreationTimestamp="2025-11-25 10:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 10:45:01.643778098 +0000 UTC m=+6961.769807661" watchObservedRunningTime="2025-11-25 10:45:01.644715955 +0000 UTC m=+6961.770745518" Nov 25 10:45:02 crc kubenswrapper[4932]: I1125 10:45:02.639808 4932 generic.go:334] "Generic (PLEG): container finished" podID="ef03811f-1312-4771-b2bb-f493f5782caa" containerID="58fcad50e72360f9145c0fb641fbcaad3f52d00a72ba7d131cd9a1bf6f9eb751" exitCode=0 Nov 25 10:45:02 crc kubenswrapper[4932]: I1125 10:45:02.639862 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" event={"ID":"ef03811f-1312-4771-b2bb-f493f5782caa","Type":"ContainerDied","Data":"58fcad50e72360f9145c0fb641fbcaad3f52d00a72ba7d131cd9a1bf6f9eb751"} Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.000782 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.142429 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ef03811f-1312-4771-b2bb-f493f5782caa-config-volume\") pod \"ef03811f-1312-4771-b2bb-f493f5782caa\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.142555 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ef03811f-1312-4771-b2bb-f493f5782caa-secret-volume\") pod \"ef03811f-1312-4771-b2bb-f493f5782caa\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.142761 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l967h\" (UniqueName: \"kubernetes.io/projected/ef03811f-1312-4771-b2bb-f493f5782caa-kube-api-access-l967h\") pod \"ef03811f-1312-4771-b2bb-f493f5782caa\" (UID: \"ef03811f-1312-4771-b2bb-f493f5782caa\") " Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.143284 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef03811f-1312-4771-b2bb-f493f5782caa-config-volume" (OuterVolumeSpecName: "config-volume") pod "ef03811f-1312-4771-b2bb-f493f5782caa" (UID: "ef03811f-1312-4771-b2bb-f493f5782caa"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.143519 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ef03811f-1312-4771-b2bb-f493f5782caa-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.149306 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef03811f-1312-4771-b2bb-f493f5782caa-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ef03811f-1312-4771-b2bb-f493f5782caa" (UID: "ef03811f-1312-4771-b2bb-f493f5782caa"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.149352 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef03811f-1312-4771-b2bb-f493f5782caa-kube-api-access-l967h" (OuterVolumeSpecName: "kube-api-access-l967h") pod "ef03811f-1312-4771-b2bb-f493f5782caa" (UID: "ef03811f-1312-4771-b2bb-f493f5782caa"). InnerVolumeSpecName "kube-api-access-l967h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.245327 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l967h\" (UniqueName: \"kubernetes.io/projected/ef03811f-1312-4771-b2bb-f493f5782caa-kube-api-access-l967h\") on node \"crc\" DevicePath \"\"" Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.245377 4932 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ef03811f-1312-4771-b2bb-f493f5782caa-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.664226 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" event={"ID":"ef03811f-1312-4771-b2bb-f493f5782caa","Type":"ContainerDied","Data":"92cd8999d0649796be9d5d963b7f366e219a37f2eac32843d3ffa1e8ea132fce"} Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.664274 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92cd8999d0649796be9d5d963b7f366e219a37f2eac32843d3ffa1e8ea132fce" Nov 25 10:45:04 crc kubenswrapper[4932]: I1125 10:45:04.664278 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401125-m2jfw" Nov 25 10:45:05 crc kubenswrapper[4932]: I1125 10:45:04.714377 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn"] Nov 25 10:45:05 crc kubenswrapper[4932]: I1125 10:45:04.722444 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401080-ttwfn"] Nov 25 10:45:06 crc kubenswrapper[4932]: I1125 10:45:06.619706 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cf2e72d-629a-4db8-98e3-685af3ddd0e4" path="/var/lib/kubelet/pods/1cf2e72d-629a-4db8-98e3-685af3ddd0e4/volumes" Nov 25 10:45:08 crc kubenswrapper[4932]: I1125 10:45:08.033863 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-jz9v2"] Nov 25 10:45:08 crc kubenswrapper[4932]: I1125 10:45:08.041709 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-jz9v2"] Nov 25 10:45:08 crc kubenswrapper[4932]: I1125 10:45:08.618858 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53043b1f-cee1-4f76-a0aa-7510ed0f3722" path="/var/lib/kubelet/pods/53043b1f-cee1-4f76-a0aa-7510ed0f3722/volumes" Nov 25 10:45:18 crc kubenswrapper[4932]: I1125 10:45:18.156788 4932 scope.go:117] "RemoveContainer" containerID="a7f7010ad95e2d17bef0ecc28ff99e10eb89004f25f84bb5361e70a0715c9e09" Nov 25 10:45:18 crc kubenswrapper[4932]: I1125 10:45:18.205533 4932 scope.go:117] "RemoveContainer" containerID="1bda477b5ead6a6d0e524e1b3e6cb8295051fc3949cc34bb7345125e81fd1141" Nov 25 10:45:18 crc kubenswrapper[4932]: I1125 10:45:18.252752 4932 scope.go:117] "RemoveContainer" containerID="69e9b29925388e2c1f8d386bd743052dff9fc2b449bb745ad81008eb92c54947" Nov 25 10:45:18 crc kubenswrapper[4932]: I1125 10:45:18.301954 4932 scope.go:117] "RemoveContainer" containerID="ff85de16609fd3055a115b7345feabb30aa61191df647dc37ee363f61074516b" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.663006 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lm7hp"] Nov 25 10:45:49 crc kubenswrapper[4932]: E1125 10:45:49.664148 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef03811f-1312-4771-b2bb-f493f5782caa" containerName="collect-profiles" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.664165 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef03811f-1312-4771-b2bb-f493f5782caa" containerName="collect-profiles" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.664526 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef03811f-1312-4771-b2bb-f493f5782caa" containerName="collect-profiles" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.666430 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.673685 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lm7hp"] Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.796239 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-utilities\") pod \"certified-operators-lm7hp\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.796432 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-catalog-content\") pod \"certified-operators-lm7hp\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.796504 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grdgg\" (UniqueName: \"kubernetes.io/projected/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-kube-api-access-grdgg\") pod \"certified-operators-lm7hp\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.860121 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qfn6k"] Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.863796 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.873746 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qfn6k"] Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.898546 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-catalog-content\") pod \"certified-operators-lm7hp\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.898649 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grdgg\" (UniqueName: \"kubernetes.io/projected/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-kube-api-access-grdgg\") pod \"certified-operators-lm7hp\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.898719 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-utilities\") pod \"certified-operators-lm7hp\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.899150 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-catalog-content\") pod \"certified-operators-lm7hp\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.899227 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-utilities\") pod \"certified-operators-lm7hp\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:45:49 crc kubenswrapper[4932]: I1125 10:45:49.918691 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grdgg\" (UniqueName: \"kubernetes.io/projected/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-kube-api-access-grdgg\") pod \"certified-operators-lm7hp\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.000764 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-utilities\") pod \"community-operators-qfn6k\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.000829 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-catalog-content\") pod \"community-operators-qfn6k\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.000946 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qskr8\" (UniqueName: \"kubernetes.io/projected/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-kube-api-access-qskr8\") pod \"community-operators-qfn6k\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.001369 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.102898 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-utilities\") pod \"community-operators-qfn6k\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.103347 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-catalog-content\") pod \"community-operators-qfn6k\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.103532 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qskr8\" (UniqueName: \"kubernetes.io/projected/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-kube-api-access-qskr8\") pod \"community-operators-qfn6k\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.103607 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-utilities\") pod \"community-operators-qfn6k\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.103932 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-catalog-content\") pod \"community-operators-qfn6k\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.127918 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qskr8\" (UniqueName: \"kubernetes.io/projected/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-kube-api-access-qskr8\") pod \"community-operators-qfn6k\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.191075 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.600833 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lm7hp"] Nov 25 10:45:50 crc kubenswrapper[4932]: W1125 10:45:50.603538 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4b8ca3d1_7f9a_41f6_9203_6cc1081b78df.slice/crio-97e0ef81caa566b88679b5e324f27597eda1f0598aa4aafa2207bc29bd156f40 WatchSource:0}: Error finding container 97e0ef81caa566b88679b5e324f27597eda1f0598aa4aafa2207bc29bd156f40: Status 404 returned error can't find the container with id 97e0ef81caa566b88679b5e324f27597eda1f0598aa4aafa2207bc29bd156f40 Nov 25 10:45:50 crc kubenswrapper[4932]: I1125 10:45:50.787301 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qfn6k"] Nov 25 10:45:50 crc kubenswrapper[4932]: W1125 10:45:50.794211 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3e9dbf3_af00_43a6_9d9f_db3ed493b38b.slice/crio-adcb60afb251fbefbbc185ab95237c401357e1c624728f99272410ec22da5ceb WatchSource:0}: Error finding container adcb60afb251fbefbbc185ab95237c401357e1c624728f99272410ec22da5ceb: Status 404 returned error can't find the container with id adcb60afb251fbefbbc185ab95237c401357e1c624728f99272410ec22da5ceb Nov 25 10:45:51 crc kubenswrapper[4932]: I1125 10:45:51.122595 4932 generic.go:334] "Generic (PLEG): container finished" podID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerID="df893518197c1c3d0fbf45d069a4d38b812b2a41a917738ccfe8fad16d06a413" exitCode=0 Nov 25 10:45:51 crc kubenswrapper[4932]: I1125 10:45:51.122670 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm7hp" event={"ID":"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df","Type":"ContainerDied","Data":"df893518197c1c3d0fbf45d069a4d38b812b2a41a917738ccfe8fad16d06a413"} Nov 25 10:45:51 crc kubenswrapper[4932]: I1125 10:45:51.122699 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm7hp" event={"ID":"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df","Type":"ContainerStarted","Data":"97e0ef81caa566b88679b5e324f27597eda1f0598aa4aafa2207bc29bd156f40"} Nov 25 10:45:51 crc kubenswrapper[4932]: I1125 10:45:51.124229 4932 generic.go:334] "Generic (PLEG): container finished" podID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerID="364dbaec9efb0c5bd3c0588f6cc56aa3f536744f018dde38547ff1106dbf45de" exitCode=0 Nov 25 10:45:51 crc kubenswrapper[4932]: I1125 10:45:51.124262 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfn6k" event={"ID":"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b","Type":"ContainerDied","Data":"364dbaec9efb0c5bd3c0588f6cc56aa3f536744f018dde38547ff1106dbf45de"} Nov 25 10:45:51 crc kubenswrapper[4932]: I1125 10:45:51.124296 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfn6k" event={"ID":"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b","Type":"ContainerStarted","Data":"adcb60afb251fbefbbc185ab95237c401357e1c624728f99272410ec22da5ceb"} Nov 25 10:45:51 crc kubenswrapper[4932]: I1125 10:45:51.125079 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:45:52 crc kubenswrapper[4932]: I1125 10:45:52.136380 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm7hp" event={"ID":"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df","Type":"ContainerStarted","Data":"d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6"} Nov 25 10:45:52 crc kubenswrapper[4932]: I1125 10:45:52.139716 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfn6k" event={"ID":"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b","Type":"ContainerStarted","Data":"b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7"} Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.266619 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mrprx"] Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.268871 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.276935 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrprx"] Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.379250 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-catalog-content\") pod \"redhat-marketplace-mrprx\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.379651 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tljx\" (UniqueName: \"kubernetes.io/projected/75ca7085-0953-4f17-9458-623e842a1386-kube-api-access-9tljx\") pod \"redhat-marketplace-mrprx\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.379771 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-utilities\") pod \"redhat-marketplace-mrprx\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.482008 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tljx\" (UniqueName: \"kubernetes.io/projected/75ca7085-0953-4f17-9458-623e842a1386-kube-api-access-9tljx\") pod \"redhat-marketplace-mrprx\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.482102 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-utilities\") pod \"redhat-marketplace-mrprx\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.482443 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-catalog-content\") pod \"redhat-marketplace-mrprx\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.482845 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-utilities\") pod \"redhat-marketplace-mrprx\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.483086 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-catalog-content\") pod \"redhat-marketplace-mrprx\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.513707 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tljx\" (UniqueName: \"kubernetes.io/projected/75ca7085-0953-4f17-9458-623e842a1386-kube-api-access-9tljx\") pod \"redhat-marketplace-mrprx\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:45:53 crc kubenswrapper[4932]: I1125 10:45:53.606679 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:45:54 crc kubenswrapper[4932]: W1125 10:45:54.128609 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75ca7085_0953_4f17_9458_623e842a1386.slice/crio-156ce5c3262687c0432e167ac8f32b7228e91b78aec50d61341b78d7acd4b956 WatchSource:0}: Error finding container 156ce5c3262687c0432e167ac8f32b7228e91b78aec50d61341b78d7acd4b956: Status 404 returned error can't find the container with id 156ce5c3262687c0432e167ac8f32b7228e91b78aec50d61341b78d7acd4b956 Nov 25 10:45:54 crc kubenswrapper[4932]: I1125 10:45:54.129515 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrprx"] Nov 25 10:45:54 crc kubenswrapper[4932]: I1125 10:45:54.162063 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrprx" event={"ID":"75ca7085-0953-4f17-9458-623e842a1386","Type":"ContainerStarted","Data":"156ce5c3262687c0432e167ac8f32b7228e91b78aec50d61341b78d7acd4b956"} Nov 25 10:45:55 crc kubenswrapper[4932]: I1125 10:45:55.174616 4932 generic.go:334] "Generic (PLEG): container finished" podID="75ca7085-0953-4f17-9458-623e842a1386" containerID="0888b83531419d34c3116333c420a12498e686b0719afc47efebbdcd6712cdb3" exitCode=0 Nov 25 10:45:55 crc kubenswrapper[4932]: I1125 10:45:55.174670 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrprx" event={"ID":"75ca7085-0953-4f17-9458-623e842a1386","Type":"ContainerDied","Data":"0888b83531419d34c3116333c420a12498e686b0719afc47efebbdcd6712cdb3"} Nov 25 10:45:56 crc kubenswrapper[4932]: I1125 10:45:56.186879 4932 generic.go:334] "Generic (PLEG): container finished" podID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerID="d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6" exitCode=0 Nov 25 10:45:56 crc kubenswrapper[4932]: I1125 10:45:56.186989 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm7hp" event={"ID":"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df","Type":"ContainerDied","Data":"d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6"} Nov 25 10:45:56 crc kubenswrapper[4932]: I1125 10:45:56.190333 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrprx" event={"ID":"75ca7085-0953-4f17-9458-623e842a1386","Type":"ContainerStarted","Data":"f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640"} Nov 25 10:45:56 crc kubenswrapper[4932]: I1125 10:45:56.193541 4932 generic.go:334] "Generic (PLEG): container finished" podID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerID="b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7" exitCode=0 Nov 25 10:45:56 crc kubenswrapper[4932]: I1125 10:45:56.193590 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfn6k" event={"ID":"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b","Type":"ContainerDied","Data":"b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7"} Nov 25 10:45:57 crc kubenswrapper[4932]: I1125 10:45:57.203812 4932 generic.go:334] "Generic (PLEG): container finished" podID="75ca7085-0953-4f17-9458-623e842a1386" containerID="f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640" exitCode=0 Nov 25 10:45:57 crc kubenswrapper[4932]: I1125 10:45:57.203915 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrprx" event={"ID":"75ca7085-0953-4f17-9458-623e842a1386","Type":"ContainerDied","Data":"f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640"} Nov 25 10:45:57 crc kubenswrapper[4932]: I1125 10:45:57.206617 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm7hp" event={"ID":"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df","Type":"ContainerStarted","Data":"21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8"} Nov 25 10:45:57 crc kubenswrapper[4932]: I1125 10:45:57.241165 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lm7hp" podStartSLOduration=2.6486138280000002 podStartE2EDuration="8.24114659s" podCreationTimestamp="2025-11-25 10:45:49 +0000 UTC" firstStartedPulling="2025-11-25 10:45:51.124824273 +0000 UTC m=+7011.250853836" lastFinishedPulling="2025-11-25 10:45:56.717357035 +0000 UTC m=+7016.843386598" observedRunningTime="2025-11-25 10:45:57.23692247 +0000 UTC m=+7017.362952033" watchObservedRunningTime="2025-11-25 10:45:57.24114659 +0000 UTC m=+7017.367176153" Nov 25 10:45:58 crc kubenswrapper[4932]: I1125 10:45:58.219935 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrprx" event={"ID":"75ca7085-0953-4f17-9458-623e842a1386","Type":"ContainerStarted","Data":"7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51"} Nov 25 10:45:58 crc kubenswrapper[4932]: I1125 10:45:58.228959 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfn6k" event={"ID":"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b","Type":"ContainerStarted","Data":"c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd"} Nov 25 10:45:58 crc kubenswrapper[4932]: I1125 10:45:58.259668 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mrprx" podStartSLOduration=2.691520164 podStartE2EDuration="5.259645912s" podCreationTimestamp="2025-11-25 10:45:53 +0000 UTC" firstStartedPulling="2025-11-25 10:45:55.176822237 +0000 UTC m=+7015.302851800" lastFinishedPulling="2025-11-25 10:45:57.744947985 +0000 UTC m=+7017.870977548" observedRunningTime="2025-11-25 10:45:58.241255735 +0000 UTC m=+7018.367285318" watchObservedRunningTime="2025-11-25 10:45:58.259645912 +0000 UTC m=+7018.385675475" Nov 25 10:45:58 crc kubenswrapper[4932]: I1125 10:45:58.270479 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qfn6k" podStartSLOduration=3.407526314 podStartE2EDuration="9.270463661s" podCreationTimestamp="2025-11-25 10:45:49 +0000 UTC" firstStartedPulling="2025-11-25 10:45:51.125620676 +0000 UTC m=+7011.251650239" lastFinishedPulling="2025-11-25 10:45:56.988558023 +0000 UTC m=+7017.114587586" observedRunningTime="2025-11-25 10:45:58.268591148 +0000 UTC m=+7018.394620711" watchObservedRunningTime="2025-11-25 10:45:58.270463661 +0000 UTC m=+7018.396493224" Nov 25 10:46:00 crc kubenswrapper[4932]: I1125 10:46:00.001942 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:46:00 crc kubenswrapper[4932]: I1125 10:46:00.002335 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:46:00 crc kubenswrapper[4932]: I1125 10:46:00.191390 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:46:00 crc kubenswrapper[4932]: I1125 10:46:00.191457 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:46:01 crc kubenswrapper[4932]: I1125 10:46:01.055624 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-lm7hp" podUID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerName="registry-server" probeResult="failure" output=< Nov 25 10:46:01 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 10:46:01 crc kubenswrapper[4932]: > Nov 25 10:46:01 crc kubenswrapper[4932]: I1125 10:46:01.236681 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-qfn6k" podUID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerName="registry-server" probeResult="failure" output=< Nov 25 10:46:01 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 10:46:01 crc kubenswrapper[4932]: > Nov 25 10:46:03 crc kubenswrapper[4932]: I1125 10:46:03.607451 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:46:03 crc kubenswrapper[4932]: I1125 10:46:03.607757 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:46:03 crc kubenswrapper[4932]: I1125 10:46:03.662676 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:46:04 crc kubenswrapper[4932]: I1125 10:46:04.334374 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.181083 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.181776 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.249576 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrprx"] Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.249818 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mrprx" podUID="75ca7085-0953-4f17-9458-623e842a1386" containerName="registry-server" containerID="cri-o://7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51" gracePeriod=2 Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.709863 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.802102 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-utilities\") pod \"75ca7085-0953-4f17-9458-623e842a1386\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.802370 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tljx\" (UniqueName: \"kubernetes.io/projected/75ca7085-0953-4f17-9458-623e842a1386-kube-api-access-9tljx\") pod \"75ca7085-0953-4f17-9458-623e842a1386\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.802421 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-catalog-content\") pod \"75ca7085-0953-4f17-9458-623e842a1386\" (UID: \"75ca7085-0953-4f17-9458-623e842a1386\") " Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.803718 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-utilities" (OuterVolumeSpecName: "utilities") pod "75ca7085-0953-4f17-9458-623e842a1386" (UID: "75ca7085-0953-4f17-9458-623e842a1386"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.808041 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75ca7085-0953-4f17-9458-623e842a1386-kube-api-access-9tljx" (OuterVolumeSpecName: "kube-api-access-9tljx") pod "75ca7085-0953-4f17-9458-623e842a1386" (UID: "75ca7085-0953-4f17-9458-623e842a1386"). InnerVolumeSpecName "kube-api-access-9tljx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.820713 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "75ca7085-0953-4f17-9458-623e842a1386" (UID: "75ca7085-0953-4f17-9458-623e842a1386"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.904953 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tljx\" (UniqueName: \"kubernetes.io/projected/75ca7085-0953-4f17-9458-623e842a1386-kube-api-access-9tljx\") on node \"crc\" DevicePath \"\"" Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.904985 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:46:07 crc kubenswrapper[4932]: I1125 10:46:07.904994 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75ca7085-0953-4f17-9458-623e842a1386-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.322839 4932 generic.go:334] "Generic (PLEG): container finished" podID="75ca7085-0953-4f17-9458-623e842a1386" containerID="7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51" exitCode=0 Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.322874 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrprx" event={"ID":"75ca7085-0953-4f17-9458-623e842a1386","Type":"ContainerDied","Data":"7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51"} Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.322921 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrprx" Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.323202 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrprx" event={"ID":"75ca7085-0953-4f17-9458-623e842a1386","Type":"ContainerDied","Data":"156ce5c3262687c0432e167ac8f32b7228e91b78aec50d61341b78d7acd4b956"} Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.323216 4932 scope.go:117] "RemoveContainer" containerID="7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51" Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.343377 4932 scope.go:117] "RemoveContainer" containerID="f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640" Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.360488 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrprx"] Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.369261 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrprx"] Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.370008 4932 scope.go:117] "RemoveContainer" containerID="0888b83531419d34c3116333c420a12498e686b0719afc47efebbdcd6712cdb3" Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.414073 4932 scope.go:117] "RemoveContainer" containerID="7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51" Nov 25 10:46:08 crc kubenswrapper[4932]: E1125 10:46:08.414521 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51\": container with ID starting with 7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51 not found: ID does not exist" containerID="7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51" Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.414558 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51"} err="failed to get container status \"7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51\": rpc error: code = NotFound desc = could not find container \"7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51\": container with ID starting with 7fc8c58ddf24e34cf15c8d11ff33bb0868f1e3c399000a3d4c2087a75a422a51 not found: ID does not exist" Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.414620 4932 scope.go:117] "RemoveContainer" containerID="f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640" Nov 25 10:46:08 crc kubenswrapper[4932]: E1125 10:46:08.414876 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640\": container with ID starting with f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640 not found: ID does not exist" containerID="f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640" Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.414908 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640"} err="failed to get container status \"f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640\": rpc error: code = NotFound desc = could not find container \"f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640\": container with ID starting with f9b0044e8ddab83e48983cf859491f34caeba45c47e618abdf95f0270e9cc640 not found: ID does not exist" Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.414924 4932 scope.go:117] "RemoveContainer" containerID="0888b83531419d34c3116333c420a12498e686b0719afc47efebbdcd6712cdb3" Nov 25 10:46:08 crc kubenswrapper[4932]: E1125 10:46:08.415133 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0888b83531419d34c3116333c420a12498e686b0719afc47efebbdcd6712cdb3\": container with ID starting with 0888b83531419d34c3116333c420a12498e686b0719afc47efebbdcd6712cdb3 not found: ID does not exist" containerID="0888b83531419d34c3116333c420a12498e686b0719afc47efebbdcd6712cdb3" Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.415162 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0888b83531419d34c3116333c420a12498e686b0719afc47efebbdcd6712cdb3"} err="failed to get container status \"0888b83531419d34c3116333c420a12498e686b0719afc47efebbdcd6712cdb3\": rpc error: code = NotFound desc = could not find container \"0888b83531419d34c3116333c420a12498e686b0719afc47efebbdcd6712cdb3\": container with ID starting with 0888b83531419d34c3116333c420a12498e686b0719afc47efebbdcd6712cdb3 not found: ID does not exist" Nov 25 10:46:08 crc kubenswrapper[4932]: I1125 10:46:08.620097 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75ca7085-0953-4f17-9458-623e842a1386" path="/var/lib/kubelet/pods/75ca7085-0953-4f17-9458-623e842a1386/volumes" Nov 25 10:46:10 crc kubenswrapper[4932]: I1125 10:46:10.083657 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:46:10 crc kubenswrapper[4932]: I1125 10:46:10.132159 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:46:10 crc kubenswrapper[4932]: I1125 10:46:10.239040 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:46:10 crc kubenswrapper[4932]: I1125 10:46:10.288721 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:46:14 crc kubenswrapper[4932]: I1125 10:46:14.850802 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lm7hp"] Nov 25 10:46:14 crc kubenswrapper[4932]: I1125 10:46:14.851902 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lm7hp" podUID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerName="registry-server" containerID="cri-o://21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8" gracePeriod=2 Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.361965 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.392704 4932 generic.go:334] "Generic (PLEG): container finished" podID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerID="21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8" exitCode=0 Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.392747 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm7hp" event={"ID":"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df","Type":"ContainerDied","Data":"21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8"} Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.392767 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm7hp" event={"ID":"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df","Type":"ContainerDied","Data":"97e0ef81caa566b88679b5e324f27597eda1f0598aa4aafa2207bc29bd156f40"} Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.392784 4932 scope.go:117] "RemoveContainer" containerID="21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.392900 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lm7hp" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.420910 4932 scope.go:117] "RemoveContainer" containerID="d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.446438 4932 scope.go:117] "RemoveContainer" containerID="df893518197c1c3d0fbf45d069a4d38b812b2a41a917738ccfe8fad16d06a413" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.480143 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grdgg\" (UniqueName: \"kubernetes.io/projected/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-kube-api-access-grdgg\") pod \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.480586 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-utilities\") pod \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.480737 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-catalog-content\") pod \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\" (UID: \"4b8ca3d1-7f9a-41f6-9203-6cc1081b78df\") " Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.482203 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-utilities" (OuterVolumeSpecName: "utilities") pod "4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" (UID: "4b8ca3d1-7f9a-41f6-9203-6cc1081b78df"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.488122 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-kube-api-access-grdgg" (OuterVolumeSpecName: "kube-api-access-grdgg") pod "4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" (UID: "4b8ca3d1-7f9a-41f6-9203-6cc1081b78df"). InnerVolumeSpecName "kube-api-access-grdgg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.494939 4932 scope.go:117] "RemoveContainer" containerID="21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8" Nov 25 10:46:15 crc kubenswrapper[4932]: E1125 10:46:15.495407 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8\": container with ID starting with 21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8 not found: ID does not exist" containerID="21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.495436 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8"} err="failed to get container status \"21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8\": rpc error: code = NotFound desc = could not find container \"21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8\": container with ID starting with 21d629f7d5fd720662d0c993f5765fe0a1a257dd740574777491ce9723c6a5c8 not found: ID does not exist" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.495456 4932 scope.go:117] "RemoveContainer" containerID="d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6" Nov 25 10:46:15 crc kubenswrapper[4932]: E1125 10:46:15.495753 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6\": container with ID starting with d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6 not found: ID does not exist" containerID="d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.495828 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6"} err="failed to get container status \"d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6\": rpc error: code = NotFound desc = could not find container \"d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6\": container with ID starting with d224e153a1de8af6e5e727be3b20008a9e715a308b77c03c50fb948a142ef3d6 not found: ID does not exist" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.495869 4932 scope.go:117] "RemoveContainer" containerID="df893518197c1c3d0fbf45d069a4d38b812b2a41a917738ccfe8fad16d06a413" Nov 25 10:46:15 crc kubenswrapper[4932]: E1125 10:46:15.496215 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df893518197c1c3d0fbf45d069a4d38b812b2a41a917738ccfe8fad16d06a413\": container with ID starting with df893518197c1c3d0fbf45d069a4d38b812b2a41a917738ccfe8fad16d06a413 not found: ID does not exist" containerID="df893518197c1c3d0fbf45d069a4d38b812b2a41a917738ccfe8fad16d06a413" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.496241 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df893518197c1c3d0fbf45d069a4d38b812b2a41a917738ccfe8fad16d06a413"} err="failed to get container status \"df893518197c1c3d0fbf45d069a4d38b812b2a41a917738ccfe8fad16d06a413\": rpc error: code = NotFound desc = could not find container \"df893518197c1c3d0fbf45d069a4d38b812b2a41a917738ccfe8fad16d06a413\": container with ID starting with df893518197c1c3d0fbf45d069a4d38b812b2a41a917738ccfe8fad16d06a413 not found: ID does not exist" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.527768 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" (UID: "4b8ca3d1-7f9a-41f6-9203-6cc1081b78df"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.584426 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.584457 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grdgg\" (UniqueName: \"kubernetes.io/projected/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-kube-api-access-grdgg\") on node \"crc\" DevicePath \"\"" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.584467 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.728067 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lm7hp"] Nov 25 10:46:15 crc kubenswrapper[4932]: I1125 10:46:15.735594 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lm7hp"] Nov 25 10:46:16 crc kubenswrapper[4932]: I1125 10:46:16.619177 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" path="/var/lib/kubelet/pods/4b8ca3d1-7f9a-41f6-9203-6cc1081b78df/volumes" Nov 25 10:46:17 crc kubenswrapper[4932]: I1125 10:46:17.649165 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qfn6k"] Nov 25 10:46:17 crc kubenswrapper[4932]: I1125 10:46:17.650280 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qfn6k" podUID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerName="registry-server" containerID="cri-o://c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd" gracePeriod=2 Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.098794 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.240717 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qskr8\" (UniqueName: \"kubernetes.io/projected/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-kube-api-access-qskr8\") pod \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.240921 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-utilities\") pod \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.241055 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-catalog-content\") pod \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\" (UID: \"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b\") " Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.242388 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-utilities" (OuterVolumeSpecName: "utilities") pod "b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" (UID: "b3e9dbf3-af00-43a6-9d9f-db3ed493b38b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.243039 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.247521 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-kube-api-access-qskr8" (OuterVolumeSpecName: "kube-api-access-qskr8") pod "b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" (UID: "b3e9dbf3-af00-43a6-9d9f-db3ed493b38b"). InnerVolumeSpecName "kube-api-access-qskr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.294911 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" (UID: "b3e9dbf3-af00-43a6-9d9f-db3ed493b38b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.345059 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.345103 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qskr8\" (UniqueName: \"kubernetes.io/projected/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b-kube-api-access-qskr8\") on node \"crc\" DevicePath \"\"" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.425132 4932 generic.go:334] "Generic (PLEG): container finished" podID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerID="c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd" exitCode=0 Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.425176 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfn6k" event={"ID":"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b","Type":"ContainerDied","Data":"c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd"} Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.425291 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qfn6k" event={"ID":"b3e9dbf3-af00-43a6-9d9f-db3ed493b38b","Type":"ContainerDied","Data":"adcb60afb251fbefbbc185ab95237c401357e1c624728f99272410ec22da5ceb"} Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.425315 4932 scope.go:117] "RemoveContainer" containerID="c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.425460 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qfn6k" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.451870 4932 scope.go:117] "RemoveContainer" containerID="b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.461436 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qfn6k"] Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.472075 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qfn6k"] Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.485477 4932 scope.go:117] "RemoveContainer" containerID="364dbaec9efb0c5bd3c0588f6cc56aa3f536744f018dde38547ff1106dbf45de" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.533922 4932 scope.go:117] "RemoveContainer" containerID="c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd" Nov 25 10:46:18 crc kubenswrapper[4932]: E1125 10:46:18.534388 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd\": container with ID starting with c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd not found: ID does not exist" containerID="c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.534430 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd"} err="failed to get container status \"c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd\": rpc error: code = NotFound desc = could not find container \"c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd\": container with ID starting with c2f2c7774c289f67d6794c9efac2206d2c13f1570ecc42e9a17464a095d25bcd not found: ID does not exist" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.534457 4932 scope.go:117] "RemoveContainer" containerID="b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7" Nov 25 10:46:18 crc kubenswrapper[4932]: E1125 10:46:18.534790 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7\": container with ID starting with b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7 not found: ID does not exist" containerID="b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.534833 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7"} err="failed to get container status \"b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7\": rpc error: code = NotFound desc = could not find container \"b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7\": container with ID starting with b794537923143b1f5170be1cbdf33370a7fffa60f37e7a4c4d0ed4f9732179c7 not found: ID does not exist" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.534870 4932 scope.go:117] "RemoveContainer" containerID="364dbaec9efb0c5bd3c0588f6cc56aa3f536744f018dde38547ff1106dbf45de" Nov 25 10:46:18 crc kubenswrapper[4932]: E1125 10:46:18.535138 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"364dbaec9efb0c5bd3c0588f6cc56aa3f536744f018dde38547ff1106dbf45de\": container with ID starting with 364dbaec9efb0c5bd3c0588f6cc56aa3f536744f018dde38547ff1106dbf45de not found: ID does not exist" containerID="364dbaec9efb0c5bd3c0588f6cc56aa3f536744f018dde38547ff1106dbf45de" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.535165 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"364dbaec9efb0c5bd3c0588f6cc56aa3f536744f018dde38547ff1106dbf45de"} err="failed to get container status \"364dbaec9efb0c5bd3c0588f6cc56aa3f536744f018dde38547ff1106dbf45de\": rpc error: code = NotFound desc = could not find container \"364dbaec9efb0c5bd3c0588f6cc56aa3f536744f018dde38547ff1106dbf45de\": container with ID starting with 364dbaec9efb0c5bd3c0588f6cc56aa3f536744f018dde38547ff1106dbf45de not found: ID does not exist" Nov 25 10:46:18 crc kubenswrapper[4932]: I1125 10:46:18.621394 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" path="/var/lib/kubelet/pods/b3e9dbf3-af00-43a6-9d9f-db3ed493b38b/volumes" Nov 25 10:46:37 crc kubenswrapper[4932]: I1125 10:46:37.181088 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:46:37 crc kubenswrapper[4932]: I1125 10:46:37.181684 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:47:07 crc kubenswrapper[4932]: I1125 10:47:07.181353 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:47:07 crc kubenswrapper[4932]: I1125 10:47:07.181848 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:47:07 crc kubenswrapper[4932]: I1125 10:47:07.181890 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 10:47:07 crc kubenswrapper[4932]: I1125 10:47:07.182833 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:47:07 crc kubenswrapper[4932]: I1125 10:47:07.182898 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" gracePeriod=600 Nov 25 10:47:07 crc kubenswrapper[4932]: E1125 10:47:07.305713 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:47:08 crc kubenswrapper[4932]: I1125 10:47:08.035897 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" exitCode=0 Nov 25 10:47:08 crc kubenswrapper[4932]: I1125 10:47:08.035959 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66"} Nov 25 10:47:08 crc kubenswrapper[4932]: I1125 10:47:08.036306 4932 scope.go:117] "RemoveContainer" containerID="f89f493934976ffb7be7739a40343cc5d02766d169103db7c02e69f490bc4c4c" Nov 25 10:47:08 crc kubenswrapper[4932]: I1125 10:47:08.037159 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:47:08 crc kubenswrapper[4932]: E1125 10:47:08.037506 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:47:18 crc kubenswrapper[4932]: I1125 10:47:18.607105 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:47:18 crc kubenswrapper[4932]: E1125 10:47:18.608011 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:47:29 crc kubenswrapper[4932]: I1125 10:47:29.606872 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:47:29 crc kubenswrapper[4932]: E1125 10:47:29.607696 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:47:35 crc kubenswrapper[4932]: I1125 10:47:35.044428 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-zh8rp"] Nov 25 10:47:35 crc kubenswrapper[4932]: I1125 10:47:35.055458 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-8008-account-create-x6zwq"] Nov 25 10:47:35 crc kubenswrapper[4932]: I1125 10:47:35.063698 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-8008-account-create-x6zwq"] Nov 25 10:47:35 crc kubenswrapper[4932]: I1125 10:47:35.071056 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-zh8rp"] Nov 25 10:47:36 crc kubenswrapper[4932]: I1125 10:47:36.623067 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="229934ff-1f5d-4203-a3e8-93c0cc404320" path="/var/lib/kubelet/pods/229934ff-1f5d-4203-a3e8-93c0cc404320/volumes" Nov 25 10:47:36 crc kubenswrapper[4932]: I1125 10:47:36.624602 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="428eabdc-4f44-4b96-b4d0-3ab48106d1ed" path="/var/lib/kubelet/pods/428eabdc-4f44-4b96-b4d0-3ab48106d1ed/volumes" Nov 25 10:47:44 crc kubenswrapper[4932]: I1125 10:47:44.606571 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:47:44 crc kubenswrapper[4932]: E1125 10:47:44.607520 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:47:49 crc kubenswrapper[4932]: I1125 10:47:49.027208 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-jcfxx"] Nov 25 10:47:49 crc kubenswrapper[4932]: I1125 10:47:49.069719 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-jcfxx"] Nov 25 10:47:50 crc kubenswrapper[4932]: I1125 10:47:50.619505 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ad43add-2f06-42b9-9802-9eb44383b894" path="/var/lib/kubelet/pods/4ad43add-2f06-42b9-9802-9eb44383b894/volumes" Nov 25 10:47:56 crc kubenswrapper[4932]: I1125 10:47:56.607091 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:47:56 crc kubenswrapper[4932]: E1125 10:47:56.608576 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:48:08 crc kubenswrapper[4932]: I1125 10:48:08.606270 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:48:08 crc kubenswrapper[4932]: E1125 10:48:08.607695 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:48:18 crc kubenswrapper[4932]: I1125 10:48:18.542310 4932 scope.go:117] "RemoveContainer" containerID="f435db27f64f5df74736326a7391f4e1cc51d59432c812b74ec11f1022b98e14" Nov 25 10:48:18 crc kubenswrapper[4932]: I1125 10:48:18.572466 4932 scope.go:117] "RemoveContainer" containerID="796d85dd018fed20032b03e569aa651a6d8471f86844e249e7c4510321a8af65" Nov 25 10:48:18 crc kubenswrapper[4932]: I1125 10:48:18.629924 4932 scope.go:117] "RemoveContainer" containerID="d48749efb5ed5e823ceab5e992d8b2a2f2deb791ad6379dbedf9b6c8afc76988" Nov 25 10:48:23 crc kubenswrapper[4932]: I1125 10:48:23.606793 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:48:23 crc kubenswrapper[4932]: E1125 10:48:23.607696 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:48:36 crc kubenswrapper[4932]: I1125 10:48:36.607370 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:48:36 crc kubenswrapper[4932]: E1125 10:48:36.608052 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:48:48 crc kubenswrapper[4932]: I1125 10:48:48.606225 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:48:48 crc kubenswrapper[4932]: E1125 10:48:48.606960 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:49:02 crc kubenswrapper[4932]: I1125 10:49:02.605898 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:49:02 crc kubenswrapper[4932]: E1125 10:49:02.606746 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:49:17 crc kubenswrapper[4932]: I1125 10:49:17.605893 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:49:17 crc kubenswrapper[4932]: E1125 10:49:17.606964 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:49:29 crc kubenswrapper[4932]: I1125 10:49:29.607171 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:49:29 crc kubenswrapper[4932]: E1125 10:49:29.608137 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:49:44 crc kubenswrapper[4932]: I1125 10:49:44.607314 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:49:44 crc kubenswrapper[4932]: E1125 10:49:44.608444 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:49:59 crc kubenswrapper[4932]: I1125 10:49:59.606120 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:49:59 crc kubenswrapper[4932]: E1125 10:49:59.607025 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:50:14 crc kubenswrapper[4932]: I1125 10:50:14.607174 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:50:14 crc kubenswrapper[4932]: E1125 10:50:14.610670 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:50:25 crc kubenswrapper[4932]: I1125 10:50:25.606950 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:50:25 crc kubenswrapper[4932]: E1125 10:50:25.608172 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.462559 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-94cwh"] Nov 25 10:50:30 crc kubenswrapper[4932]: E1125 10:50:30.463621 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerName="extract-utilities" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.463640 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerName="extract-utilities" Nov 25 10:50:30 crc kubenswrapper[4932]: E1125 10:50:30.463658 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerName="registry-server" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.463665 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerName="registry-server" Nov 25 10:50:30 crc kubenswrapper[4932]: E1125 10:50:30.463681 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerName="extract-content" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.463689 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerName="extract-content" Nov 25 10:50:30 crc kubenswrapper[4932]: E1125 10:50:30.463720 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerName="registry-server" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.463728 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerName="registry-server" Nov 25 10:50:30 crc kubenswrapper[4932]: E1125 10:50:30.463743 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75ca7085-0953-4f17-9458-623e842a1386" containerName="extract-utilities" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.463750 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="75ca7085-0953-4f17-9458-623e842a1386" containerName="extract-utilities" Nov 25 10:50:30 crc kubenswrapper[4932]: E1125 10:50:30.463762 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerName="extract-content" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.463768 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerName="extract-content" Nov 25 10:50:30 crc kubenswrapper[4932]: E1125 10:50:30.463783 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerName="extract-utilities" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.463790 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerName="extract-utilities" Nov 25 10:50:30 crc kubenswrapper[4932]: E1125 10:50:30.463806 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75ca7085-0953-4f17-9458-623e842a1386" containerName="registry-server" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.463813 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="75ca7085-0953-4f17-9458-623e842a1386" containerName="registry-server" Nov 25 10:50:30 crc kubenswrapper[4932]: E1125 10:50:30.463830 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75ca7085-0953-4f17-9458-623e842a1386" containerName="extract-content" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.463837 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="75ca7085-0953-4f17-9458-623e842a1386" containerName="extract-content" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.464053 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="75ca7085-0953-4f17-9458-623e842a1386" containerName="registry-server" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.464067 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3e9dbf3-af00-43a6-9d9f-db3ed493b38b" containerName="registry-server" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.464094 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b8ca3d1-7f9a-41f6-9203-6cc1081b78df" containerName="registry-server" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.466072 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.479932 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-94cwh"] Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.572509 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-catalog-content\") pod \"redhat-operators-94cwh\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.572574 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p46hd\" (UniqueName: \"kubernetes.io/projected/a6789b75-af75-47b3-b807-94d019acf4bd-kube-api-access-p46hd\") pod \"redhat-operators-94cwh\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.572633 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-utilities\") pod \"redhat-operators-94cwh\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.675069 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-catalog-content\") pod \"redhat-operators-94cwh\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.675139 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p46hd\" (UniqueName: \"kubernetes.io/projected/a6789b75-af75-47b3-b807-94d019acf4bd-kube-api-access-p46hd\") pod \"redhat-operators-94cwh\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.675224 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-utilities\") pod \"redhat-operators-94cwh\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.675640 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-catalog-content\") pod \"redhat-operators-94cwh\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.676421 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-utilities\") pod \"redhat-operators-94cwh\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.703504 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p46hd\" (UniqueName: \"kubernetes.io/projected/a6789b75-af75-47b3-b807-94d019acf4bd-kube-api-access-p46hd\") pod \"redhat-operators-94cwh\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:30 crc kubenswrapper[4932]: I1125 10:50:30.792890 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:31 crc kubenswrapper[4932]: I1125 10:50:31.338757 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-94cwh"] Nov 25 10:50:31 crc kubenswrapper[4932]: I1125 10:50:31.935937 4932 generic.go:334] "Generic (PLEG): container finished" podID="a6789b75-af75-47b3-b807-94d019acf4bd" containerID="6e5665253e6c4d082319467bcc5f2f070141038f2207d7f4f4eff50f6e85b900" exitCode=0 Nov 25 10:50:31 crc kubenswrapper[4932]: I1125 10:50:31.936081 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94cwh" event={"ID":"a6789b75-af75-47b3-b807-94d019acf4bd","Type":"ContainerDied","Data":"6e5665253e6c4d082319467bcc5f2f070141038f2207d7f4f4eff50f6e85b900"} Nov 25 10:50:31 crc kubenswrapper[4932]: I1125 10:50:31.936356 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94cwh" event={"ID":"a6789b75-af75-47b3-b807-94d019acf4bd","Type":"ContainerStarted","Data":"6c466f8d7c9b29d5f441bd3d3ddfa1be49ebad5cd7cf73789dd0b6ea31e7410d"} Nov 25 10:50:32 crc kubenswrapper[4932]: I1125 10:50:32.947403 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94cwh" event={"ID":"a6789b75-af75-47b3-b807-94d019acf4bd","Type":"ContainerStarted","Data":"490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9"} Nov 25 10:50:37 crc kubenswrapper[4932]: I1125 10:50:37.993537 4932 generic.go:334] "Generic (PLEG): container finished" podID="a6789b75-af75-47b3-b807-94d019acf4bd" containerID="490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9" exitCode=0 Nov 25 10:50:37 crc kubenswrapper[4932]: I1125 10:50:37.993615 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94cwh" event={"ID":"a6789b75-af75-47b3-b807-94d019acf4bd","Type":"ContainerDied","Data":"490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9"} Nov 25 10:50:39 crc kubenswrapper[4932]: I1125 10:50:39.005315 4932 generic.go:334] "Generic (PLEG): container finished" podID="87b34be8-c901-488e-b049-a745b41c53c7" containerID="cfaddc31a5ce3ba0e1a6d565fd15df7b3658b2cf8286daba1063f787c684ba73" exitCode=0 Nov 25 10:50:39 crc kubenswrapper[4932]: I1125 10:50:39.005414 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" event={"ID":"87b34be8-c901-488e-b049-a745b41c53c7","Type":"ContainerDied","Data":"cfaddc31a5ce3ba0e1a6d565fd15df7b3658b2cf8286daba1063f787c684ba73"} Nov 25 10:50:39 crc kubenswrapper[4932]: I1125 10:50:39.008709 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94cwh" event={"ID":"a6789b75-af75-47b3-b807-94d019acf4bd","Type":"ContainerStarted","Data":"d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f"} Nov 25 10:50:39 crc kubenswrapper[4932]: I1125 10:50:39.076777 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-94cwh" podStartSLOduration=2.521140131 podStartE2EDuration="9.076755853s" podCreationTimestamp="2025-11-25 10:50:30 +0000 UTC" firstStartedPulling="2025-11-25 10:50:31.939809624 +0000 UTC m=+7292.065839187" lastFinishedPulling="2025-11-25 10:50:38.495425346 +0000 UTC m=+7298.621454909" observedRunningTime="2025-11-25 10:50:39.070361249 +0000 UTC m=+7299.196390812" watchObservedRunningTime="2025-11-25 10:50:39.076755853 +0000 UTC m=+7299.202785406" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.446909 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.524712 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-ssh-key\") pod \"87b34be8-c901-488e-b049-a745b41c53c7\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.524811 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6h7t8\" (UniqueName: \"kubernetes.io/projected/87b34be8-c901-488e-b049-a745b41c53c7-kube-api-access-6h7t8\") pod \"87b34be8-c901-488e-b049-a745b41c53c7\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.524884 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-tripleo-cleanup-combined-ca-bundle\") pod \"87b34be8-c901-488e-b049-a745b41c53c7\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.524906 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-inventory\") pod \"87b34be8-c901-488e-b049-a745b41c53c7\" (UID: \"87b34be8-c901-488e-b049-a745b41c53c7\") " Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.530654 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87b34be8-c901-488e-b049-a745b41c53c7-kube-api-access-6h7t8" (OuterVolumeSpecName: "kube-api-access-6h7t8") pod "87b34be8-c901-488e-b049-a745b41c53c7" (UID: "87b34be8-c901-488e-b049-a745b41c53c7"). InnerVolumeSpecName "kube-api-access-6h7t8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.530771 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-tripleo-cleanup-combined-ca-bundle" (OuterVolumeSpecName: "tripleo-cleanup-combined-ca-bundle") pod "87b34be8-c901-488e-b049-a745b41c53c7" (UID: "87b34be8-c901-488e-b049-a745b41c53c7"). InnerVolumeSpecName "tripleo-cleanup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.553830 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-inventory" (OuterVolumeSpecName: "inventory") pod "87b34be8-c901-488e-b049-a745b41c53c7" (UID: "87b34be8-c901-488e-b049-a745b41c53c7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.554164 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "87b34be8-c901-488e-b049-a745b41c53c7" (UID: "87b34be8-c901-488e-b049-a745b41c53c7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.614246 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:50:40 crc kubenswrapper[4932]: E1125 10:50:40.614528 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.627276 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.627304 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6h7t8\" (UniqueName: \"kubernetes.io/projected/87b34be8-c901-488e-b049-a745b41c53c7-kube-api-access-6h7t8\") on node \"crc\" DevicePath \"\"" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.627316 4932 reconciler_common.go:293] "Volume detached for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-tripleo-cleanup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.627324 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/87b34be8-c901-488e-b049-a745b41c53c7-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.793816 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:40 crc kubenswrapper[4932]: I1125 10:50:40.793863 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:41 crc kubenswrapper[4932]: I1125 10:50:41.027680 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" event={"ID":"87b34be8-c901-488e-b049-a745b41c53c7","Type":"ContainerDied","Data":"97d3332844f14ea6305fb61c16e8846044a3f6529e5fa3f9f1c29dcfd8caf281"} Nov 25 10:50:41 crc kubenswrapper[4932]: I1125 10:50:41.027719 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97d3332844f14ea6305fb61c16e8846044a3f6529e5fa3f9f1c29dcfd8caf281" Nov 25 10:50:41 crc kubenswrapper[4932]: I1125 10:50:41.027738 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-6fwd9" Nov 25 10:50:41 crc kubenswrapper[4932]: I1125 10:50:41.838486 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-94cwh" podUID="a6789b75-af75-47b3-b807-94d019acf4bd" containerName="registry-server" probeResult="failure" output=< Nov 25 10:50:41 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 10:50:41 crc kubenswrapper[4932]: > Nov 25 10:50:50 crc kubenswrapper[4932]: I1125 10:50:50.839741 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:50 crc kubenswrapper[4932]: I1125 10:50:50.894049 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:51 crc kubenswrapper[4932]: I1125 10:50:51.076785 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-94cwh"] Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.122176 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-94cwh" podUID="a6789b75-af75-47b3-b807-94d019acf4bd" containerName="registry-server" containerID="cri-o://d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f" gracePeriod=2 Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.442723 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-bxp65"] Nov 25 10:50:52 crc kubenswrapper[4932]: E1125 10:50:52.443621 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87b34be8-c901-488e-b049-a745b41c53c7" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.443646 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="87b34be8-c901-488e-b049-a745b41c53c7" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.443905 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="87b34be8-c901-488e-b049-a745b41c53c7" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.444873 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.447939 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.448608 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.448849 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.451250 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.487503 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-bxp65"] Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.491174 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.491231 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-inventory\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.491511 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.491740 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgjtm\" (UniqueName: \"kubernetes.io/projected/8421053d-e806-49df-b351-59d68382c398-kube-api-access-cgjtm\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.592746 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.592791 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-inventory\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.592859 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.592945 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgjtm\" (UniqueName: \"kubernetes.io/projected/8421053d-e806-49df-b351-59d68382c398-kube-api-access-cgjtm\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.599212 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.599336 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-inventory\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.602120 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.608324 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgjtm\" (UniqueName: \"kubernetes.io/projected/8421053d-e806-49df-b351-59d68382c398-kube-api-access-cgjtm\") pod \"bootstrap-openstack-openstack-cell1-bxp65\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.685976 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.774528 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.796555 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-catalog-content\") pod \"a6789b75-af75-47b3-b807-94d019acf4bd\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.796680 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-utilities\") pod \"a6789b75-af75-47b3-b807-94d019acf4bd\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.796820 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p46hd\" (UniqueName: \"kubernetes.io/projected/a6789b75-af75-47b3-b807-94d019acf4bd-kube-api-access-p46hd\") pod \"a6789b75-af75-47b3-b807-94d019acf4bd\" (UID: \"a6789b75-af75-47b3-b807-94d019acf4bd\") " Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.797760 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-utilities" (OuterVolumeSpecName: "utilities") pod "a6789b75-af75-47b3-b807-94d019acf4bd" (UID: "a6789b75-af75-47b3-b807-94d019acf4bd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.801577 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6789b75-af75-47b3-b807-94d019acf4bd-kube-api-access-p46hd" (OuterVolumeSpecName: "kube-api-access-p46hd") pod "a6789b75-af75-47b3-b807-94d019acf4bd" (UID: "a6789b75-af75-47b3-b807-94d019acf4bd"). InnerVolumeSpecName "kube-api-access-p46hd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.896708 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a6789b75-af75-47b3-b807-94d019acf4bd" (UID: "a6789b75-af75-47b3-b807-94d019acf4bd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.899826 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.899855 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a6789b75-af75-47b3-b807-94d019acf4bd-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:50:52 crc kubenswrapper[4932]: I1125 10:50:52.899868 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p46hd\" (UniqueName: \"kubernetes.io/projected/a6789b75-af75-47b3-b807-94d019acf4bd-kube-api-access-p46hd\") on node \"crc\" DevicePath \"\"" Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.147113 4932 generic.go:334] "Generic (PLEG): container finished" podID="a6789b75-af75-47b3-b807-94d019acf4bd" containerID="d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f" exitCode=0 Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.147155 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94cwh" event={"ID":"a6789b75-af75-47b3-b807-94d019acf4bd","Type":"ContainerDied","Data":"d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f"} Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.147178 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-94cwh" Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.147223 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94cwh" event={"ID":"a6789b75-af75-47b3-b807-94d019acf4bd","Type":"ContainerDied","Data":"6c466f8d7c9b29d5f441bd3d3ddfa1be49ebad5cd7cf73789dd0b6ea31e7410d"} Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.147253 4932 scope.go:117] "RemoveContainer" containerID="d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f" Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.184732 4932 scope.go:117] "RemoveContainer" containerID="490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9" Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.198912 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-94cwh"] Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.211570 4932 scope.go:117] "RemoveContainer" containerID="6e5665253e6c4d082319467bcc5f2f070141038f2207d7f4f4eff50f6e85b900" Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.224264 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-94cwh"] Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.236728 4932 scope.go:117] "RemoveContainer" containerID="d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f" Nov 25 10:50:53 crc kubenswrapper[4932]: E1125 10:50:53.241376 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f\": container with ID starting with d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f not found: ID does not exist" containerID="d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f" Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.241423 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f"} err="failed to get container status \"d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f\": rpc error: code = NotFound desc = could not find container \"d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f\": container with ID starting with d3abade605ac0a5b5376c4f16d682c70b2872dfaf99bab4e277e0029953a3e7f not found: ID does not exist" Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.241454 4932 scope.go:117] "RemoveContainer" containerID="490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9" Nov 25 10:50:53 crc kubenswrapper[4932]: E1125 10:50:53.242591 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9\": container with ID starting with 490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9 not found: ID does not exist" containerID="490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9" Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.242618 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9"} err="failed to get container status \"490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9\": rpc error: code = NotFound desc = could not find container \"490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9\": container with ID starting with 490ac28c4c045d45c3873dde3d5bd1dd5be8dedcabf5c7b99489129e34f79be9 not found: ID does not exist" Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.242637 4932 scope.go:117] "RemoveContainer" containerID="6e5665253e6c4d082319467bcc5f2f070141038f2207d7f4f4eff50f6e85b900" Nov 25 10:50:53 crc kubenswrapper[4932]: E1125 10:50:53.242870 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e5665253e6c4d082319467bcc5f2f070141038f2207d7f4f4eff50f6e85b900\": container with ID starting with 6e5665253e6c4d082319467bcc5f2f070141038f2207d7f4f4eff50f6e85b900 not found: ID does not exist" containerID="6e5665253e6c4d082319467bcc5f2f070141038f2207d7f4f4eff50f6e85b900" Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.242892 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e5665253e6c4d082319467bcc5f2f070141038f2207d7f4f4eff50f6e85b900"} err="failed to get container status \"6e5665253e6c4d082319467bcc5f2f070141038f2207d7f4f4eff50f6e85b900\": rpc error: code = NotFound desc = could not find container \"6e5665253e6c4d082319467bcc5f2f070141038f2207d7f4f4eff50f6e85b900\": container with ID starting with 6e5665253e6c4d082319467bcc5f2f070141038f2207d7f4f4eff50f6e85b900 not found: ID does not exist" Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.331273 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-bxp65"] Nov 25 10:50:53 crc kubenswrapper[4932]: W1125 10:50:53.334325 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8421053d_e806_49df_b351_59d68382c398.slice/crio-1248b39032800631edd790c298f6560380a4963f7edc12f6c16644924f9e5f28 WatchSource:0}: Error finding container 1248b39032800631edd790c298f6560380a4963f7edc12f6c16644924f9e5f28: Status 404 returned error can't find the container with id 1248b39032800631edd790c298f6560380a4963f7edc12f6c16644924f9e5f28 Nov 25 10:50:53 crc kubenswrapper[4932]: I1125 10:50:53.336932 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:50:54 crc kubenswrapper[4932]: I1125 10:50:54.163221 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" event={"ID":"8421053d-e806-49df-b351-59d68382c398","Type":"ContainerStarted","Data":"1248b39032800631edd790c298f6560380a4963f7edc12f6c16644924f9e5f28"} Nov 25 10:50:54 crc kubenswrapper[4932]: I1125 10:50:54.607740 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:50:54 crc kubenswrapper[4932]: E1125 10:50:54.608019 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:50:54 crc kubenswrapper[4932]: I1125 10:50:54.621872 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6789b75-af75-47b3-b807-94d019acf4bd" path="/var/lib/kubelet/pods/a6789b75-af75-47b3-b807-94d019acf4bd/volumes" Nov 25 10:50:55 crc kubenswrapper[4932]: I1125 10:50:55.186699 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" event={"ID":"8421053d-e806-49df-b351-59d68382c398","Type":"ContainerStarted","Data":"686d8139deffd2e8ae8816f0b1e9a6be3787742b8b22d8bc6e44ac45d265ed1b"} Nov 25 10:50:55 crc kubenswrapper[4932]: I1125 10:50:55.201742 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" podStartSLOduration=2.562441993 podStartE2EDuration="3.201728852s" podCreationTimestamp="2025-11-25 10:50:52 +0000 UTC" firstStartedPulling="2025-11-25 10:50:53.33667526 +0000 UTC m=+7313.462704833" lastFinishedPulling="2025-11-25 10:50:53.975962129 +0000 UTC m=+7314.101991692" observedRunningTime="2025-11-25 10:50:55.200690682 +0000 UTC m=+7315.326720245" watchObservedRunningTime="2025-11-25 10:50:55.201728852 +0000 UTC m=+7315.327758415" Nov 25 10:51:05 crc kubenswrapper[4932]: I1125 10:51:05.606221 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:51:05 crc kubenswrapper[4932]: E1125 10:51:05.607230 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:51:19 crc kubenswrapper[4932]: I1125 10:51:19.606496 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:51:19 crc kubenswrapper[4932]: E1125 10:51:19.607529 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:51:34 crc kubenswrapper[4932]: I1125 10:51:34.606418 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:51:34 crc kubenswrapper[4932]: E1125 10:51:34.607105 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:51:48 crc kubenswrapper[4932]: I1125 10:51:48.607244 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:51:48 crc kubenswrapper[4932]: E1125 10:51:48.608468 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:51:59 crc kubenswrapper[4932]: I1125 10:51:59.607467 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:51:59 crc kubenswrapper[4932]: E1125 10:51:59.609052 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:52:10 crc kubenswrapper[4932]: I1125 10:52:10.616095 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:52:10 crc kubenswrapper[4932]: I1125 10:52:10.947509 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"f25878df9a1b0709ed13ec05916688940d18560b65912b6aa892c7e65177215a"} Nov 25 10:53:54 crc kubenswrapper[4932]: I1125 10:53:54.017098 4932 generic.go:334] "Generic (PLEG): container finished" podID="8421053d-e806-49df-b351-59d68382c398" containerID="686d8139deffd2e8ae8816f0b1e9a6be3787742b8b22d8bc6e44ac45d265ed1b" exitCode=0 Nov 25 10:53:54 crc kubenswrapper[4932]: I1125 10:53:54.017376 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" event={"ID":"8421053d-e806-49df-b351-59d68382c398","Type":"ContainerDied","Data":"686d8139deffd2e8ae8816f0b1e9a6be3787742b8b22d8bc6e44ac45d265ed1b"} Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.462810 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.541883 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-inventory\") pod \"8421053d-e806-49df-b351-59d68382c398\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.541998 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgjtm\" (UniqueName: \"kubernetes.io/projected/8421053d-e806-49df-b351-59d68382c398-kube-api-access-cgjtm\") pod \"8421053d-e806-49df-b351-59d68382c398\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.542057 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-ssh-key\") pod \"8421053d-e806-49df-b351-59d68382c398\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.542156 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-bootstrap-combined-ca-bundle\") pod \"8421053d-e806-49df-b351-59d68382c398\" (UID: \"8421053d-e806-49df-b351-59d68382c398\") " Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.548475 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "8421053d-e806-49df-b351-59d68382c398" (UID: "8421053d-e806-49df-b351-59d68382c398"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.548779 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8421053d-e806-49df-b351-59d68382c398-kube-api-access-cgjtm" (OuterVolumeSpecName: "kube-api-access-cgjtm") pod "8421053d-e806-49df-b351-59d68382c398" (UID: "8421053d-e806-49df-b351-59d68382c398"). InnerVolumeSpecName "kube-api-access-cgjtm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.573717 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-inventory" (OuterVolumeSpecName: "inventory") pod "8421053d-e806-49df-b351-59d68382c398" (UID: "8421053d-e806-49df-b351-59d68382c398"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.579481 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8421053d-e806-49df-b351-59d68382c398" (UID: "8421053d-e806-49df-b351-59d68382c398"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.644003 4932 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.644034 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.644048 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgjtm\" (UniqueName: \"kubernetes.io/projected/8421053d-e806-49df-b351-59d68382c398-kube-api-access-cgjtm\") on node \"crc\" DevicePath \"\"" Nov 25 10:53:55 crc kubenswrapper[4932]: I1125 10:53:55.644060 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8421053d-e806-49df-b351-59d68382c398-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.037698 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" event={"ID":"8421053d-e806-49df-b351-59d68382c398","Type":"ContainerDied","Data":"1248b39032800631edd790c298f6560380a4963f7edc12f6c16644924f9e5f28"} Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.038268 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1248b39032800631edd790c298f6560380a4963f7edc12f6c16644924f9e5f28" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.037936 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-bxp65" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.123585 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-phgpf"] Nov 25 10:53:56 crc kubenswrapper[4932]: E1125 10:53:56.124204 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8421053d-e806-49df-b351-59d68382c398" containerName="bootstrap-openstack-openstack-cell1" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.124229 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8421053d-e806-49df-b351-59d68382c398" containerName="bootstrap-openstack-openstack-cell1" Nov 25 10:53:56 crc kubenswrapper[4932]: E1125 10:53:56.124253 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6789b75-af75-47b3-b807-94d019acf4bd" containerName="extract-utilities" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.124263 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6789b75-af75-47b3-b807-94d019acf4bd" containerName="extract-utilities" Nov 25 10:53:56 crc kubenswrapper[4932]: E1125 10:53:56.124301 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6789b75-af75-47b3-b807-94d019acf4bd" containerName="registry-server" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.124310 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6789b75-af75-47b3-b807-94d019acf4bd" containerName="registry-server" Nov 25 10:53:56 crc kubenswrapper[4932]: E1125 10:53:56.124321 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6789b75-af75-47b3-b807-94d019acf4bd" containerName="extract-content" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.124328 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6789b75-af75-47b3-b807-94d019acf4bd" containerName="extract-content" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.124597 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="8421053d-e806-49df-b351-59d68382c398" containerName="bootstrap-openstack-openstack-cell1" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.124628 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6789b75-af75-47b3-b807-94d019acf4bd" containerName="registry-server" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.125553 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.127844 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.128488 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.131026 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.132016 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.144447 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-phgpf"] Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.261445 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-ssh-key\") pod \"download-cache-openstack-openstack-cell1-phgpf\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.261541 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ng47h\" (UniqueName: \"kubernetes.io/projected/f19b056a-9859-435b-8057-a07b11946b0e-kube-api-access-ng47h\") pod \"download-cache-openstack-openstack-cell1-phgpf\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.261569 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-inventory\") pod \"download-cache-openstack-openstack-cell1-phgpf\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.364107 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ng47h\" (UniqueName: \"kubernetes.io/projected/f19b056a-9859-435b-8057-a07b11946b0e-kube-api-access-ng47h\") pod \"download-cache-openstack-openstack-cell1-phgpf\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.364220 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-inventory\") pod \"download-cache-openstack-openstack-cell1-phgpf\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.364587 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-ssh-key\") pod \"download-cache-openstack-openstack-cell1-phgpf\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.372044 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-ssh-key\") pod \"download-cache-openstack-openstack-cell1-phgpf\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.372771 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-inventory\") pod \"download-cache-openstack-openstack-cell1-phgpf\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.399146 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ng47h\" (UniqueName: \"kubernetes.io/projected/f19b056a-9859-435b-8057-a07b11946b0e-kube-api-access-ng47h\") pod \"download-cache-openstack-openstack-cell1-phgpf\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:53:56 crc kubenswrapper[4932]: I1125 10:53:56.453478 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:53:57 crc kubenswrapper[4932]: I1125 10:53:57.036647 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-phgpf"] Nov 25 10:53:57 crc kubenswrapper[4932]: W1125 10:53:57.043114 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf19b056a_9859_435b_8057_a07b11946b0e.slice/crio-a9ec62684fc76e618d20b31acd41e50ae5b1f01eead49d98eaca0011efdb5835 WatchSource:0}: Error finding container a9ec62684fc76e618d20b31acd41e50ae5b1f01eead49d98eaca0011efdb5835: Status 404 returned error can't find the container with id a9ec62684fc76e618d20b31acd41e50ae5b1f01eead49d98eaca0011efdb5835 Nov 25 10:53:58 crc kubenswrapper[4932]: I1125 10:53:58.058152 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-phgpf" event={"ID":"f19b056a-9859-435b-8057-a07b11946b0e","Type":"ContainerStarted","Data":"fd20eabc7ab58df3b30a44c5a0557a4ccdae14b02d4d7946ec1fe906dc2e2a9b"} Nov 25 10:53:58 crc kubenswrapper[4932]: I1125 10:53:58.058523 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-phgpf" event={"ID":"f19b056a-9859-435b-8057-a07b11946b0e","Type":"ContainerStarted","Data":"a9ec62684fc76e618d20b31acd41e50ae5b1f01eead49d98eaca0011efdb5835"} Nov 25 10:53:58 crc kubenswrapper[4932]: I1125 10:53:58.079929 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-openstack-openstack-cell1-phgpf" podStartSLOduration=1.652012823 podStartE2EDuration="2.079912779s" podCreationTimestamp="2025-11-25 10:53:56 +0000 UTC" firstStartedPulling="2025-11-25 10:53:57.049280302 +0000 UTC m=+7497.175309865" lastFinishedPulling="2025-11-25 10:53:57.477180248 +0000 UTC m=+7497.603209821" observedRunningTime="2025-11-25 10:53:58.074533944 +0000 UTC m=+7498.200563517" watchObservedRunningTime="2025-11-25 10:53:58.079912779 +0000 UTC m=+7498.205942342" Nov 25 10:54:37 crc kubenswrapper[4932]: I1125 10:54:37.181185 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:54:37 crc kubenswrapper[4932]: I1125 10:54:37.181794 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:55:07 crc kubenswrapper[4932]: I1125 10:55:07.181509 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:55:07 crc kubenswrapper[4932]: I1125 10:55:07.182121 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:55:35 crc kubenswrapper[4932]: I1125 10:55:35.039457 4932 generic.go:334] "Generic (PLEG): container finished" podID="f19b056a-9859-435b-8057-a07b11946b0e" containerID="fd20eabc7ab58df3b30a44c5a0557a4ccdae14b02d4d7946ec1fe906dc2e2a9b" exitCode=0 Nov 25 10:55:35 crc kubenswrapper[4932]: I1125 10:55:35.039564 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-phgpf" event={"ID":"f19b056a-9859-435b-8057-a07b11946b0e","Type":"ContainerDied","Data":"fd20eabc7ab58df3b30a44c5a0557a4ccdae14b02d4d7946ec1fe906dc2e2a9b"} Nov 25 10:55:36 crc kubenswrapper[4932]: I1125 10:55:36.451528 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:55:36 crc kubenswrapper[4932]: I1125 10:55:36.637778 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-inventory\") pod \"f19b056a-9859-435b-8057-a07b11946b0e\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " Nov 25 10:55:36 crc kubenswrapper[4932]: I1125 10:55:36.637841 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-ssh-key\") pod \"f19b056a-9859-435b-8057-a07b11946b0e\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " Nov 25 10:55:36 crc kubenswrapper[4932]: I1125 10:55:36.637961 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ng47h\" (UniqueName: \"kubernetes.io/projected/f19b056a-9859-435b-8057-a07b11946b0e-kube-api-access-ng47h\") pod \"f19b056a-9859-435b-8057-a07b11946b0e\" (UID: \"f19b056a-9859-435b-8057-a07b11946b0e\") " Nov 25 10:55:36 crc kubenswrapper[4932]: I1125 10:55:36.645430 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f19b056a-9859-435b-8057-a07b11946b0e-kube-api-access-ng47h" (OuterVolumeSpecName: "kube-api-access-ng47h") pod "f19b056a-9859-435b-8057-a07b11946b0e" (UID: "f19b056a-9859-435b-8057-a07b11946b0e"). InnerVolumeSpecName "kube-api-access-ng47h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:55:36 crc kubenswrapper[4932]: I1125 10:55:36.673464 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f19b056a-9859-435b-8057-a07b11946b0e" (UID: "f19b056a-9859-435b-8057-a07b11946b0e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:55:36 crc kubenswrapper[4932]: I1125 10:55:36.674708 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-inventory" (OuterVolumeSpecName: "inventory") pod "f19b056a-9859-435b-8057-a07b11946b0e" (UID: "f19b056a-9859-435b-8057-a07b11946b0e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:55:36 crc kubenswrapper[4932]: I1125 10:55:36.741502 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:55:36 crc kubenswrapper[4932]: I1125 10:55:36.741531 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f19b056a-9859-435b-8057-a07b11946b0e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:55:36 crc kubenswrapper[4932]: I1125 10:55:36.741541 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ng47h\" (UniqueName: \"kubernetes.io/projected/f19b056a-9859-435b-8057-a07b11946b0e-kube-api-access-ng47h\") on node \"crc\" DevicePath \"\"" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.060820 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-phgpf" event={"ID":"f19b056a-9859-435b-8057-a07b11946b0e","Type":"ContainerDied","Data":"a9ec62684fc76e618d20b31acd41e50ae5b1f01eead49d98eaca0011efdb5835"} Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.060909 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9ec62684fc76e618d20b31acd41e50ae5b1f01eead49d98eaca0011efdb5835" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.060959 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-phgpf" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.153242 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-c6dw6"] Nov 25 10:55:37 crc kubenswrapper[4932]: E1125 10:55:37.153737 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f19b056a-9859-435b-8057-a07b11946b0e" containerName="download-cache-openstack-openstack-cell1" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.153753 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f19b056a-9859-435b-8057-a07b11946b0e" containerName="download-cache-openstack-openstack-cell1" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.153945 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f19b056a-9859-435b-8057-a07b11946b0e" containerName="download-cache-openstack-openstack-cell1" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.154702 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.156582 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.156988 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.157089 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.157086 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.170901 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-c6dw6"] Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.189932 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.190000 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.190054 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.190921 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f25878df9a1b0709ed13ec05916688940d18560b65912b6aa892c7e65177215a"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.190970 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://f25878df9a1b0709ed13ec05916688940d18560b65912b6aa892c7e65177215a" gracePeriod=600 Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.252460 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-inventory\") pod \"configure-network-openstack-openstack-cell1-c6dw6\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.252722 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-ssh-key\") pod \"configure-network-openstack-openstack-cell1-c6dw6\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.252807 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsrp5\" (UniqueName: \"kubernetes.io/projected/154a3ab1-f234-471d-9b63-6999db7f0af4-kube-api-access-nsrp5\") pod \"configure-network-openstack-openstack-cell1-c6dw6\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.354472 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-ssh-key\") pod \"configure-network-openstack-openstack-cell1-c6dw6\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.354858 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsrp5\" (UniqueName: \"kubernetes.io/projected/154a3ab1-f234-471d-9b63-6999db7f0af4-kube-api-access-nsrp5\") pod \"configure-network-openstack-openstack-cell1-c6dw6\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.355034 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-inventory\") pod \"configure-network-openstack-openstack-cell1-c6dw6\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.359885 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-ssh-key\") pod \"configure-network-openstack-openstack-cell1-c6dw6\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.361441 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-inventory\") pod \"configure-network-openstack-openstack-cell1-c6dw6\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.385169 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsrp5\" (UniqueName: \"kubernetes.io/projected/154a3ab1-f234-471d-9b63-6999db7f0af4-kube-api-access-nsrp5\") pod \"configure-network-openstack-openstack-cell1-c6dw6\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:55:37 crc kubenswrapper[4932]: I1125 10:55:37.471429 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:55:38 crc kubenswrapper[4932]: I1125 10:55:38.041301 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-c6dw6"] Nov 25 10:55:38 crc kubenswrapper[4932]: W1125 10:55:38.044501 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod154a3ab1_f234_471d_9b63_6999db7f0af4.slice/crio-6f7b2dc8057f522817b805e77574f96738d30cf874897eb19f3cd3b0e9016df5 WatchSource:0}: Error finding container 6f7b2dc8057f522817b805e77574f96738d30cf874897eb19f3cd3b0e9016df5: Status 404 returned error can't find the container with id 6f7b2dc8057f522817b805e77574f96738d30cf874897eb19f3cd3b0e9016df5 Nov 25 10:55:38 crc kubenswrapper[4932]: I1125 10:55:38.072023 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="f25878df9a1b0709ed13ec05916688940d18560b65912b6aa892c7e65177215a" exitCode=0 Nov 25 10:55:38 crc kubenswrapper[4932]: I1125 10:55:38.072101 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"f25878df9a1b0709ed13ec05916688940d18560b65912b6aa892c7e65177215a"} Nov 25 10:55:38 crc kubenswrapper[4932]: I1125 10:55:38.072150 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b"} Nov 25 10:55:38 crc kubenswrapper[4932]: I1125 10:55:38.072175 4932 scope.go:117] "RemoveContainer" containerID="52472e0cf9d6c62b78636dcfdd22815660a4e579a8bd2bfec3516b360dcbfc66" Nov 25 10:55:38 crc kubenswrapper[4932]: I1125 10:55:38.073932 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" event={"ID":"154a3ab1-f234-471d-9b63-6999db7f0af4","Type":"ContainerStarted","Data":"6f7b2dc8057f522817b805e77574f96738d30cf874897eb19f3cd3b0e9016df5"} Nov 25 10:55:39 crc kubenswrapper[4932]: I1125 10:55:39.087396 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" event={"ID":"154a3ab1-f234-471d-9b63-6999db7f0af4","Type":"ContainerStarted","Data":"58fa4a6b07f0ede0ed1b10e39046b3d087654fc9080c3c03a0ef680d7db07f5b"} Nov 25 10:55:39 crc kubenswrapper[4932]: I1125 10:55:39.110602 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" podStartSLOduration=1.6890146769999999 podStartE2EDuration="2.110579951s" podCreationTimestamp="2025-11-25 10:55:37 +0000 UTC" firstStartedPulling="2025-11-25 10:55:38.047525014 +0000 UTC m=+7598.173554577" lastFinishedPulling="2025-11-25 10:55:38.469090238 +0000 UTC m=+7598.595119851" observedRunningTime="2025-11-25 10:55:39.107972176 +0000 UTC m=+7599.234001749" watchObservedRunningTime="2025-11-25 10:55:39.110579951 +0000 UTC m=+7599.236609514" Nov 25 10:56:24 crc kubenswrapper[4932]: I1125 10:56:24.800055 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xzz94"] Nov 25 10:56:24 crc kubenswrapper[4932]: I1125 10:56:24.802781 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:24 crc kubenswrapper[4932]: I1125 10:56:24.820870 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xzz94"] Nov 25 10:56:24 crc kubenswrapper[4932]: I1125 10:56:24.937259 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-catalog-content\") pod \"community-operators-xzz94\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:24 crc kubenswrapper[4932]: I1125 10:56:24.937554 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-utilities\") pod \"community-operators-xzz94\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:24 crc kubenswrapper[4932]: I1125 10:56:24.937634 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsws6\" (UniqueName: \"kubernetes.io/projected/978a06bf-e39c-42ce-b417-e8a1648a6f7f-kube-api-access-gsws6\") pod \"community-operators-xzz94\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:25 crc kubenswrapper[4932]: I1125 10:56:25.039006 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-utilities\") pod \"community-operators-xzz94\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:25 crc kubenswrapper[4932]: I1125 10:56:25.039531 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-utilities\") pod \"community-operators-xzz94\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:25 crc kubenswrapper[4932]: I1125 10:56:25.039648 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsws6\" (UniqueName: \"kubernetes.io/projected/978a06bf-e39c-42ce-b417-e8a1648a6f7f-kube-api-access-gsws6\") pod \"community-operators-xzz94\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:25 crc kubenswrapper[4932]: I1125 10:56:25.039744 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-catalog-content\") pod \"community-operators-xzz94\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:25 crc kubenswrapper[4932]: I1125 10:56:25.040031 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-catalog-content\") pod \"community-operators-xzz94\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:25 crc kubenswrapper[4932]: I1125 10:56:25.072597 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsws6\" (UniqueName: \"kubernetes.io/projected/978a06bf-e39c-42ce-b417-e8a1648a6f7f-kube-api-access-gsws6\") pod \"community-operators-xzz94\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:25 crc kubenswrapper[4932]: I1125 10:56:25.127341 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:25 crc kubenswrapper[4932]: I1125 10:56:25.675523 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xzz94"] Nov 25 10:56:26 crc kubenswrapper[4932]: I1125 10:56:26.548553 4932 generic.go:334] "Generic (PLEG): container finished" podID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerID="1da84e978ce18353665e8bf032af0c41c47124131340ef878ab92e92fbf2f6b5" exitCode=0 Nov 25 10:56:26 crc kubenswrapper[4932]: I1125 10:56:26.548635 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xzz94" event={"ID":"978a06bf-e39c-42ce-b417-e8a1648a6f7f","Type":"ContainerDied","Data":"1da84e978ce18353665e8bf032af0c41c47124131340ef878ab92e92fbf2f6b5"} Nov 25 10:56:26 crc kubenswrapper[4932]: I1125 10:56:26.549048 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xzz94" event={"ID":"978a06bf-e39c-42ce-b417-e8a1648a6f7f","Type":"ContainerStarted","Data":"a909560fe25fc862c0b380f9e528f1bb2df1f02bdb257679220e8b80219aa1f1"} Nov 25 10:56:26 crc kubenswrapper[4932]: I1125 10:56:26.553711 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 10:56:28 crc kubenswrapper[4932]: I1125 10:56:28.576273 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xzz94" event={"ID":"978a06bf-e39c-42ce-b417-e8a1648a6f7f","Type":"ContainerStarted","Data":"56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4"} Nov 25 10:56:31 crc kubenswrapper[4932]: I1125 10:56:31.603064 4932 generic.go:334] "Generic (PLEG): container finished" podID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerID="56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4" exitCode=0 Nov 25 10:56:31 crc kubenswrapper[4932]: I1125 10:56:31.603156 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xzz94" event={"ID":"978a06bf-e39c-42ce-b417-e8a1648a6f7f","Type":"ContainerDied","Data":"56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4"} Nov 25 10:56:33 crc kubenswrapper[4932]: I1125 10:56:33.621178 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xzz94" event={"ID":"978a06bf-e39c-42ce-b417-e8a1648a6f7f","Type":"ContainerStarted","Data":"3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c"} Nov 25 10:56:33 crc kubenswrapper[4932]: I1125 10:56:33.652500 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xzz94" podStartSLOduration=3.204387196 podStartE2EDuration="9.652476684s" podCreationTimestamp="2025-11-25 10:56:24 +0000 UTC" firstStartedPulling="2025-11-25 10:56:26.553479003 +0000 UTC m=+7646.679508556" lastFinishedPulling="2025-11-25 10:56:33.001568481 +0000 UTC m=+7653.127598044" observedRunningTime="2025-11-25 10:56:33.645615967 +0000 UTC m=+7653.771645550" watchObservedRunningTime="2025-11-25 10:56:33.652476684 +0000 UTC m=+7653.778506257" Nov 25 10:56:35 crc kubenswrapper[4932]: I1125 10:56:35.128309 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:35 crc kubenswrapper[4932]: I1125 10:56:35.128667 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:36 crc kubenswrapper[4932]: I1125 10:56:36.183562 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-xzz94" podUID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerName="registry-server" probeResult="failure" output=< Nov 25 10:56:36 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 10:56:36 crc kubenswrapper[4932]: > Nov 25 10:56:45 crc kubenswrapper[4932]: I1125 10:56:45.196878 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:45 crc kubenswrapper[4932]: I1125 10:56:45.251423 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:45 crc kubenswrapper[4932]: I1125 10:56:45.433922 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xzz94"] Nov 25 10:56:46 crc kubenswrapper[4932]: I1125 10:56:46.742288 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xzz94" podUID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerName="registry-server" containerID="cri-o://3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c" gracePeriod=2 Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.315934 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.470235 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-utilities\") pod \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.470399 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsws6\" (UniqueName: \"kubernetes.io/projected/978a06bf-e39c-42ce-b417-e8a1648a6f7f-kube-api-access-gsws6\") pod \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.470797 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-catalog-content\") pod \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\" (UID: \"978a06bf-e39c-42ce-b417-e8a1648a6f7f\") " Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.471321 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-utilities" (OuterVolumeSpecName: "utilities") pod "978a06bf-e39c-42ce-b417-e8a1648a6f7f" (UID: "978a06bf-e39c-42ce-b417-e8a1648a6f7f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.471880 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.478649 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/978a06bf-e39c-42ce-b417-e8a1648a6f7f-kube-api-access-gsws6" (OuterVolumeSpecName: "kube-api-access-gsws6") pod "978a06bf-e39c-42ce-b417-e8a1648a6f7f" (UID: "978a06bf-e39c-42ce-b417-e8a1648a6f7f"). InnerVolumeSpecName "kube-api-access-gsws6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.527684 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "978a06bf-e39c-42ce-b417-e8a1648a6f7f" (UID: "978a06bf-e39c-42ce-b417-e8a1648a6f7f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.573462 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsws6\" (UniqueName: \"kubernetes.io/projected/978a06bf-e39c-42ce-b417-e8a1648a6f7f-kube-api-access-gsws6\") on node \"crc\" DevicePath \"\"" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.573501 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/978a06bf-e39c-42ce-b417-e8a1648a6f7f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.760065 4932 generic.go:334] "Generic (PLEG): container finished" podID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerID="3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c" exitCode=0 Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.760115 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xzz94" event={"ID":"978a06bf-e39c-42ce-b417-e8a1648a6f7f","Type":"ContainerDied","Data":"3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c"} Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.760124 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xzz94" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.760151 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xzz94" event={"ID":"978a06bf-e39c-42ce-b417-e8a1648a6f7f","Type":"ContainerDied","Data":"a909560fe25fc862c0b380f9e528f1bb2df1f02bdb257679220e8b80219aa1f1"} Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.760174 4932 scope.go:117] "RemoveContainer" containerID="3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.786558 4932 scope.go:117] "RemoveContainer" containerID="56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.796653 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xzz94"] Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.809577 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xzz94"] Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.840771 4932 scope.go:117] "RemoveContainer" containerID="1da84e978ce18353665e8bf032af0c41c47124131340ef878ab92e92fbf2f6b5" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.861753 4932 scope.go:117] "RemoveContainer" containerID="3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c" Nov 25 10:56:47 crc kubenswrapper[4932]: E1125 10:56:47.862166 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c\": container with ID starting with 3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c not found: ID does not exist" containerID="3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.862236 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c"} err="failed to get container status \"3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c\": rpc error: code = NotFound desc = could not find container \"3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c\": container with ID starting with 3815c8d929555e4f1d2011f8ea275214500e4f90d5dae7b8dab66f390bdfa07c not found: ID does not exist" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.862264 4932 scope.go:117] "RemoveContainer" containerID="56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4" Nov 25 10:56:47 crc kubenswrapper[4932]: E1125 10:56:47.862612 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4\": container with ID starting with 56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4 not found: ID does not exist" containerID="56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.862645 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4"} err="failed to get container status \"56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4\": rpc error: code = NotFound desc = could not find container \"56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4\": container with ID starting with 56f20fc7e5777409bfe13f91bb859bb73d9772f38cc27e6029b1a42bb49dbfc4 not found: ID does not exist" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.862664 4932 scope.go:117] "RemoveContainer" containerID="1da84e978ce18353665e8bf032af0c41c47124131340ef878ab92e92fbf2f6b5" Nov 25 10:56:47 crc kubenswrapper[4932]: E1125 10:56:47.862839 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1da84e978ce18353665e8bf032af0c41c47124131340ef878ab92e92fbf2f6b5\": container with ID starting with 1da84e978ce18353665e8bf032af0c41c47124131340ef878ab92e92fbf2f6b5 not found: ID does not exist" containerID="1da84e978ce18353665e8bf032af0c41c47124131340ef878ab92e92fbf2f6b5" Nov 25 10:56:47 crc kubenswrapper[4932]: I1125 10:56:47.863007 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1da84e978ce18353665e8bf032af0c41c47124131340ef878ab92e92fbf2f6b5"} err="failed to get container status \"1da84e978ce18353665e8bf032af0c41c47124131340ef878ab92e92fbf2f6b5\": rpc error: code = NotFound desc = could not find container \"1da84e978ce18353665e8bf032af0c41c47124131340ef878ab92e92fbf2f6b5\": container with ID starting with 1da84e978ce18353665e8bf032af0c41c47124131340ef878ab92e92fbf2f6b5 not found: ID does not exist" Nov 25 10:56:48 crc kubenswrapper[4932]: I1125 10:56:48.617367 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" path="/var/lib/kubelet/pods/978a06bf-e39c-42ce-b417-e8a1648a6f7f/volumes" Nov 25 10:56:56 crc kubenswrapper[4932]: I1125 10:56:56.863075 4932 generic.go:334] "Generic (PLEG): container finished" podID="154a3ab1-f234-471d-9b63-6999db7f0af4" containerID="58fa4a6b07f0ede0ed1b10e39046b3d087654fc9080c3c03a0ef680d7db07f5b" exitCode=0 Nov 25 10:56:56 crc kubenswrapper[4932]: I1125 10:56:56.863205 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" event={"ID":"154a3ab1-f234-471d-9b63-6999db7f0af4","Type":"ContainerDied","Data":"58fa4a6b07f0ede0ed1b10e39046b3d087654fc9080c3c03a0ef680d7db07f5b"} Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.308357 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.428724 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-inventory\") pod \"154a3ab1-f234-471d-9b63-6999db7f0af4\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.428775 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-ssh-key\") pod \"154a3ab1-f234-471d-9b63-6999db7f0af4\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.429056 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsrp5\" (UniqueName: \"kubernetes.io/projected/154a3ab1-f234-471d-9b63-6999db7f0af4-kube-api-access-nsrp5\") pod \"154a3ab1-f234-471d-9b63-6999db7f0af4\" (UID: \"154a3ab1-f234-471d-9b63-6999db7f0af4\") " Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.434166 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/154a3ab1-f234-471d-9b63-6999db7f0af4-kube-api-access-nsrp5" (OuterVolumeSpecName: "kube-api-access-nsrp5") pod "154a3ab1-f234-471d-9b63-6999db7f0af4" (UID: "154a3ab1-f234-471d-9b63-6999db7f0af4"). InnerVolumeSpecName "kube-api-access-nsrp5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.461377 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-inventory" (OuterVolumeSpecName: "inventory") pod "154a3ab1-f234-471d-9b63-6999db7f0af4" (UID: "154a3ab1-f234-471d-9b63-6999db7f0af4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.465156 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "154a3ab1-f234-471d-9b63-6999db7f0af4" (UID: "154a3ab1-f234-471d-9b63-6999db7f0af4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.531703 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.531736 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/154a3ab1-f234-471d-9b63-6999db7f0af4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.531747 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsrp5\" (UniqueName: \"kubernetes.io/projected/154a3ab1-f234-471d-9b63-6999db7f0af4-kube-api-access-nsrp5\") on node \"crc\" DevicePath \"\"" Nov 25 10:56:58 crc kubenswrapper[4932]: E1125 10:56:58.843896 4932 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod154a3ab1_f234_471d_9b63_6999db7f0af4.slice/crio-6f7b2dc8057f522817b805e77574f96738d30cf874897eb19f3cd3b0e9016df5\": RecentStats: unable to find data in memory cache]" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.882764 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" event={"ID":"154a3ab1-f234-471d-9b63-6999db7f0af4","Type":"ContainerDied","Data":"6f7b2dc8057f522817b805e77574f96738d30cf874897eb19f3cd3b0e9016df5"} Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.883129 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f7b2dc8057f522817b805e77574f96738d30cf874897eb19f3cd3b0e9016df5" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.882822 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-c6dw6" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.967162 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-8pm98"] Nov 25 10:56:58 crc kubenswrapper[4932]: E1125 10:56:58.967856 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerName="extract-content" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.967950 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerName="extract-content" Nov 25 10:56:58 crc kubenswrapper[4932]: E1125 10:56:58.968028 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerName="registry-server" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.968098 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerName="registry-server" Nov 25 10:56:58 crc kubenswrapper[4932]: E1125 10:56:58.968177 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="154a3ab1-f234-471d-9b63-6999db7f0af4" containerName="configure-network-openstack-openstack-cell1" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.968280 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="154a3ab1-f234-471d-9b63-6999db7f0af4" containerName="configure-network-openstack-openstack-cell1" Nov 25 10:56:58 crc kubenswrapper[4932]: E1125 10:56:58.968371 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerName="extract-utilities" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.968421 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerName="extract-utilities" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.968725 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="154a3ab1-f234-471d-9b63-6999db7f0af4" containerName="configure-network-openstack-openstack-cell1" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.968803 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="978a06bf-e39c-42ce-b417-e8a1648a6f7f" containerName="registry-server" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.969829 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.971838 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.971999 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.972138 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.972779 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:56:58 crc kubenswrapper[4932]: I1125 10:56:58.990870 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-8pm98"] Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.148131 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxrvb\" (UniqueName: \"kubernetes.io/projected/2a889be6-1b85-45ae-806c-7f7d64e5360c-kube-api-access-xxrvb\") pod \"validate-network-openstack-openstack-cell1-8pm98\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.148298 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-inventory\") pod \"validate-network-openstack-openstack-cell1-8pm98\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.148361 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-ssh-key\") pod \"validate-network-openstack-openstack-cell1-8pm98\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.250447 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxrvb\" (UniqueName: \"kubernetes.io/projected/2a889be6-1b85-45ae-806c-7f7d64e5360c-kube-api-access-xxrvb\") pod \"validate-network-openstack-openstack-cell1-8pm98\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.250554 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-inventory\") pod \"validate-network-openstack-openstack-cell1-8pm98\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.250599 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-ssh-key\") pod \"validate-network-openstack-openstack-cell1-8pm98\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.260367 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-inventory\") pod \"validate-network-openstack-openstack-cell1-8pm98\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.261042 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-ssh-key\") pod \"validate-network-openstack-openstack-cell1-8pm98\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.267688 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxrvb\" (UniqueName: \"kubernetes.io/projected/2a889be6-1b85-45ae-806c-7f7d64e5360c-kube-api-access-xxrvb\") pod \"validate-network-openstack-openstack-cell1-8pm98\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.289048 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.826715 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-8pm98"] Nov 25 10:56:59 crc kubenswrapper[4932]: W1125 10:56:59.835230 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a889be6_1b85_45ae_806c_7f7d64e5360c.slice/crio-82269c300853793e89546d8f7d1c54d78f281171a38a53cbfa7a6757930c4647 WatchSource:0}: Error finding container 82269c300853793e89546d8f7d1c54d78f281171a38a53cbfa7a6757930c4647: Status 404 returned error can't find the container with id 82269c300853793e89546d8f7d1c54d78f281171a38a53cbfa7a6757930c4647 Nov 25 10:56:59 crc kubenswrapper[4932]: I1125 10:56:59.894174 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-8pm98" event={"ID":"2a889be6-1b85-45ae-806c-7f7d64e5360c","Type":"ContainerStarted","Data":"82269c300853793e89546d8f7d1c54d78f281171a38a53cbfa7a6757930c4647"} Nov 25 10:57:01 crc kubenswrapper[4932]: I1125 10:57:01.221424 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:57:01 crc kubenswrapper[4932]: I1125 10:57:01.930797 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-8pm98" event={"ID":"2a889be6-1b85-45ae-806c-7f7d64e5360c","Type":"ContainerStarted","Data":"9b5ae186b9cb7fd0707d61618256d090af97b13b0b9aafe1b1af934356db757c"} Nov 25 10:57:01 crc kubenswrapper[4932]: I1125 10:57:01.961509 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-openstack-openstack-cell1-8pm98" podStartSLOduration=2.583125517 podStartE2EDuration="3.961489449s" podCreationTimestamp="2025-11-25 10:56:58 +0000 UTC" firstStartedPulling="2025-11-25 10:56:59.838665951 +0000 UTC m=+7679.964695514" lastFinishedPulling="2025-11-25 10:57:01.217029883 +0000 UTC m=+7681.343059446" observedRunningTime="2025-11-25 10:57:01.954741156 +0000 UTC m=+7682.080770719" watchObservedRunningTime="2025-11-25 10:57:01.961489449 +0000 UTC m=+7682.087519012" Nov 25 10:57:06 crc kubenswrapper[4932]: I1125 10:57:06.981267 4932 generic.go:334] "Generic (PLEG): container finished" podID="2a889be6-1b85-45ae-806c-7f7d64e5360c" containerID="9b5ae186b9cb7fd0707d61618256d090af97b13b0b9aafe1b1af934356db757c" exitCode=0 Nov 25 10:57:06 crc kubenswrapper[4932]: I1125 10:57:06.981303 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-8pm98" event={"ID":"2a889be6-1b85-45ae-806c-7f7d64e5360c","Type":"ContainerDied","Data":"9b5ae186b9cb7fd0707d61618256d090af97b13b0b9aafe1b1af934356db757c"} Nov 25 10:57:08 crc kubenswrapper[4932]: I1125 10:57:08.439442 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:57:08 crc kubenswrapper[4932]: I1125 10:57:08.559417 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxrvb\" (UniqueName: \"kubernetes.io/projected/2a889be6-1b85-45ae-806c-7f7d64e5360c-kube-api-access-xxrvb\") pod \"2a889be6-1b85-45ae-806c-7f7d64e5360c\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " Nov 25 10:57:08 crc kubenswrapper[4932]: I1125 10:57:08.559605 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-ssh-key\") pod \"2a889be6-1b85-45ae-806c-7f7d64e5360c\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " Nov 25 10:57:08 crc kubenswrapper[4932]: I1125 10:57:08.559708 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-inventory\") pod \"2a889be6-1b85-45ae-806c-7f7d64e5360c\" (UID: \"2a889be6-1b85-45ae-806c-7f7d64e5360c\") " Nov 25 10:57:08 crc kubenswrapper[4932]: I1125 10:57:08.576519 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a889be6-1b85-45ae-806c-7f7d64e5360c-kube-api-access-xxrvb" (OuterVolumeSpecName: "kube-api-access-xxrvb") pod "2a889be6-1b85-45ae-806c-7f7d64e5360c" (UID: "2a889be6-1b85-45ae-806c-7f7d64e5360c"). InnerVolumeSpecName "kube-api-access-xxrvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:57:08 crc kubenswrapper[4932]: I1125 10:57:08.593557 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2a889be6-1b85-45ae-806c-7f7d64e5360c" (UID: "2a889be6-1b85-45ae-806c-7f7d64e5360c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:57:08 crc kubenswrapper[4932]: I1125 10:57:08.594554 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-inventory" (OuterVolumeSpecName: "inventory") pod "2a889be6-1b85-45ae-806c-7f7d64e5360c" (UID: "2a889be6-1b85-45ae-806c-7f7d64e5360c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:57:08 crc kubenswrapper[4932]: I1125 10:57:08.662243 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxrvb\" (UniqueName: \"kubernetes.io/projected/2a889be6-1b85-45ae-806c-7f7d64e5360c-kube-api-access-xxrvb\") on node \"crc\" DevicePath \"\"" Nov 25 10:57:08 crc kubenswrapper[4932]: I1125 10:57:08.662497 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:57:08 crc kubenswrapper[4932]: I1125 10:57:08.662507 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a889be6-1b85-45ae-806c-7f7d64e5360c-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.004739 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-8pm98" event={"ID":"2a889be6-1b85-45ae-806c-7f7d64e5360c","Type":"ContainerDied","Data":"82269c300853793e89546d8f7d1c54d78f281171a38a53cbfa7a6757930c4647"} Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.004836 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82269c300853793e89546d8f7d1c54d78f281171a38a53cbfa7a6757930c4647" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.004913 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-8pm98" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.085548 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-openstack-openstack-cell1-v4vg2"] Nov 25 10:57:09 crc kubenswrapper[4932]: E1125 10:57:09.086803 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a889be6-1b85-45ae-806c-7f7d64e5360c" containerName="validate-network-openstack-openstack-cell1" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.086901 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a889be6-1b85-45ae-806c-7f7d64e5360c" containerName="validate-network-openstack-openstack-cell1" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.087337 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a889be6-1b85-45ae-806c-7f7d64e5360c" containerName="validate-network-openstack-openstack-cell1" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.093876 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.097654 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.098106 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.098375 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.108782 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-v4vg2"] Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.110943 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.174347 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gn6lr\" (UniqueName: \"kubernetes.io/projected/4396feb3-09a0-4a90-94f6-6494483a4e94-kube-api-access-gn6lr\") pod \"install-os-openstack-openstack-cell1-v4vg2\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.174684 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-inventory\") pod \"install-os-openstack-openstack-cell1-v4vg2\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.174818 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-ssh-key\") pod \"install-os-openstack-openstack-cell1-v4vg2\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.277609 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-inventory\") pod \"install-os-openstack-openstack-cell1-v4vg2\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.277725 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-ssh-key\") pod \"install-os-openstack-openstack-cell1-v4vg2\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.277771 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gn6lr\" (UniqueName: \"kubernetes.io/projected/4396feb3-09a0-4a90-94f6-6494483a4e94-kube-api-access-gn6lr\") pod \"install-os-openstack-openstack-cell1-v4vg2\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.282808 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-inventory\") pod \"install-os-openstack-openstack-cell1-v4vg2\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.283293 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-ssh-key\") pod \"install-os-openstack-openstack-cell1-v4vg2\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.295924 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gn6lr\" (UniqueName: \"kubernetes.io/projected/4396feb3-09a0-4a90-94f6-6494483a4e94-kube-api-access-gn6lr\") pod \"install-os-openstack-openstack-cell1-v4vg2\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.430998 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:09 crc kubenswrapper[4932]: I1125 10:57:09.969149 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-v4vg2"] Nov 25 10:57:10 crc kubenswrapper[4932]: I1125 10:57:10.014711 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-v4vg2" event={"ID":"4396feb3-09a0-4a90-94f6-6494483a4e94","Type":"ContainerStarted","Data":"2699d4ea78687b32f494007abd402c7ea656ff468af4c882070d4babebc8097e"} Nov 25 10:57:13 crc kubenswrapper[4932]: I1125 10:57:13.044754 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-v4vg2" event={"ID":"4396feb3-09a0-4a90-94f6-6494483a4e94","Type":"ContainerStarted","Data":"e9555a26491554754b4b8c081202272032356300bea55fe2bf4036c183b1bb63"} Nov 25 10:57:13 crc kubenswrapper[4932]: I1125 10:57:13.068906 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-openstack-openstack-cell1-v4vg2" podStartSLOduration=2.276849102 podStartE2EDuration="4.068888081s" podCreationTimestamp="2025-11-25 10:57:09 +0000 UTC" firstStartedPulling="2025-11-25 10:57:09.97763094 +0000 UTC m=+7690.103660503" lastFinishedPulling="2025-11-25 10:57:11.769669919 +0000 UTC m=+7691.895699482" observedRunningTime="2025-11-25 10:57:13.065248206 +0000 UTC m=+7693.191277799" watchObservedRunningTime="2025-11-25 10:57:13.068888081 +0000 UTC m=+7693.194917644" Nov 25 10:57:37 crc kubenswrapper[4932]: I1125 10:57:37.180591 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:57:37 crc kubenswrapper[4932]: I1125 10:57:37.181050 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:57:56 crc kubenswrapper[4932]: I1125 10:57:56.445430 4932 generic.go:334] "Generic (PLEG): container finished" podID="4396feb3-09a0-4a90-94f6-6494483a4e94" containerID="e9555a26491554754b4b8c081202272032356300bea55fe2bf4036c183b1bb63" exitCode=0 Nov 25 10:57:56 crc kubenswrapper[4932]: I1125 10:57:56.445508 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-v4vg2" event={"ID":"4396feb3-09a0-4a90-94f6-6494483a4e94","Type":"ContainerDied","Data":"e9555a26491554754b4b8c081202272032356300bea55fe2bf4036c183b1bb63"} Nov 25 10:57:57 crc kubenswrapper[4932]: I1125 10:57:57.908049 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.071241 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-inventory\") pod \"4396feb3-09a0-4a90-94f6-6494483a4e94\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.071365 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gn6lr\" (UniqueName: \"kubernetes.io/projected/4396feb3-09a0-4a90-94f6-6494483a4e94-kube-api-access-gn6lr\") pod \"4396feb3-09a0-4a90-94f6-6494483a4e94\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.071598 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-ssh-key\") pod \"4396feb3-09a0-4a90-94f6-6494483a4e94\" (UID: \"4396feb3-09a0-4a90-94f6-6494483a4e94\") " Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.078558 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4396feb3-09a0-4a90-94f6-6494483a4e94-kube-api-access-gn6lr" (OuterVolumeSpecName: "kube-api-access-gn6lr") pod "4396feb3-09a0-4a90-94f6-6494483a4e94" (UID: "4396feb3-09a0-4a90-94f6-6494483a4e94"). InnerVolumeSpecName "kube-api-access-gn6lr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.101078 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-inventory" (OuterVolumeSpecName: "inventory") pod "4396feb3-09a0-4a90-94f6-6494483a4e94" (UID: "4396feb3-09a0-4a90-94f6-6494483a4e94"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.105315 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4396feb3-09a0-4a90-94f6-6494483a4e94" (UID: "4396feb3-09a0-4a90-94f6-6494483a4e94"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.173742 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.173785 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4396feb3-09a0-4a90-94f6-6494483a4e94-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.173805 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gn6lr\" (UniqueName: \"kubernetes.io/projected/4396feb3-09a0-4a90-94f6-6494483a4e94-kube-api-access-gn6lr\") on node \"crc\" DevicePath \"\"" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.465891 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-v4vg2" event={"ID":"4396feb3-09a0-4a90-94f6-6494483a4e94","Type":"ContainerDied","Data":"2699d4ea78687b32f494007abd402c7ea656ff468af4c882070d4babebc8097e"} Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.465930 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2699d4ea78687b32f494007abd402c7ea656ff468af4c882070d4babebc8097e" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.465969 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-v4vg2" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.617913 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-plnq9"] Nov 25 10:57:58 crc kubenswrapper[4932]: E1125 10:57:58.618288 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4396feb3-09a0-4a90-94f6-6494483a4e94" containerName="install-os-openstack-openstack-cell1" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.618308 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4396feb3-09a0-4a90-94f6-6494483a4e94" containerName="install-os-openstack-openstack-cell1" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.618556 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4396feb3-09a0-4a90-94f6-6494483a4e94" containerName="install-os-openstack-openstack-cell1" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.619344 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.624474 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.624757 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.624881 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.624994 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.631183 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-plnq9"] Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.685027 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-inventory\") pod \"configure-os-openstack-openstack-cell1-plnq9\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.685596 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-ssh-key\") pod \"configure-os-openstack-openstack-cell1-plnq9\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.685955 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cx54\" (UniqueName: \"kubernetes.io/projected/a0bb63e9-064b-4970-b98f-a5e8cf377302-kube-api-access-4cx54\") pod \"configure-os-openstack-openstack-cell1-plnq9\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.788396 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-inventory\") pod \"configure-os-openstack-openstack-cell1-plnq9\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.788464 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-ssh-key\") pod \"configure-os-openstack-openstack-cell1-plnq9\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.788532 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cx54\" (UniqueName: \"kubernetes.io/projected/a0bb63e9-064b-4970-b98f-a5e8cf377302-kube-api-access-4cx54\") pod \"configure-os-openstack-openstack-cell1-plnq9\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.792372 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-ssh-key\") pod \"configure-os-openstack-openstack-cell1-plnq9\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.792396 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-inventory\") pod \"configure-os-openstack-openstack-cell1-plnq9\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.816618 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cx54\" (UniqueName: \"kubernetes.io/projected/a0bb63e9-064b-4970-b98f-a5e8cf377302-kube-api-access-4cx54\") pod \"configure-os-openstack-openstack-cell1-plnq9\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:57:58 crc kubenswrapper[4932]: I1125 10:57:58.941235 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:57:59 crc kubenswrapper[4932]: I1125 10:57:59.474246 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-plnq9"] Nov 25 10:58:00 crc kubenswrapper[4932]: I1125 10:58:00.487705 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-plnq9" event={"ID":"a0bb63e9-064b-4970-b98f-a5e8cf377302","Type":"ContainerStarted","Data":"19b73eeff228a87fd36d69148adb384cb7fb7b6449c07e8e9c1f587033a6c506"} Nov 25 10:58:00 crc kubenswrapper[4932]: I1125 10:58:00.488029 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-plnq9" event={"ID":"a0bb63e9-064b-4970-b98f-a5e8cf377302","Type":"ContainerStarted","Data":"b74abc2c2bc90d108b8eaaf90a34e6e250a70bf1f6524bacaa0f33af6c9b59cc"} Nov 25 10:58:00 crc kubenswrapper[4932]: I1125 10:58:00.558743 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-plnq9" podStartSLOduration=2.162354875 podStartE2EDuration="2.558719625s" podCreationTimestamp="2025-11-25 10:57:58 +0000 UTC" firstStartedPulling="2025-11-25 10:57:59.482806031 +0000 UTC m=+7739.608835594" lastFinishedPulling="2025-11-25 10:57:59.879170781 +0000 UTC m=+7740.005200344" observedRunningTime="2025-11-25 10:58:00.55331984 +0000 UTC m=+7740.679349403" watchObservedRunningTime="2025-11-25 10:58:00.558719625 +0000 UTC m=+7740.684749188" Nov 25 10:58:07 crc kubenswrapper[4932]: I1125 10:58:07.181488 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:58:07 crc kubenswrapper[4932]: I1125 10:58:07.182171 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.374261 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lt9c4"] Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.377470 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.390246 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lt9c4"] Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.560599 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvlt8\" (UniqueName: \"kubernetes.io/projected/5f1fb940-d756-4280-bfe8-f9d26095f04f-kube-api-access-dvlt8\") pod \"redhat-marketplace-lt9c4\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.560713 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-utilities\") pod \"redhat-marketplace-lt9c4\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.561441 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-catalog-content\") pod \"redhat-marketplace-lt9c4\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.663761 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-utilities\") pod \"redhat-marketplace-lt9c4\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.663869 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-catalog-content\") pod \"redhat-marketplace-lt9c4\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.664248 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-utilities\") pod \"redhat-marketplace-lt9c4\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.664368 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvlt8\" (UniqueName: \"kubernetes.io/projected/5f1fb940-d756-4280-bfe8-f9d26095f04f-kube-api-access-dvlt8\") pod \"redhat-marketplace-lt9c4\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.664654 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-catalog-content\") pod \"redhat-marketplace-lt9c4\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.691028 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvlt8\" (UniqueName: \"kubernetes.io/projected/5f1fb940-d756-4280-bfe8-f9d26095f04f-kube-api-access-dvlt8\") pod \"redhat-marketplace-lt9c4\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:31 crc kubenswrapper[4932]: I1125 10:58:31.701026 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:32 crc kubenswrapper[4932]: I1125 10:58:32.195429 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lt9c4"] Nov 25 10:58:32 crc kubenswrapper[4932]: I1125 10:58:32.856950 4932 generic.go:334] "Generic (PLEG): container finished" podID="5f1fb940-d756-4280-bfe8-f9d26095f04f" containerID="834116298a78ca3045dd4108d77964d8d53f60c2820248d974d9e19ec9811917" exitCode=0 Nov 25 10:58:32 crc kubenswrapper[4932]: I1125 10:58:32.857059 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lt9c4" event={"ID":"5f1fb940-d756-4280-bfe8-f9d26095f04f","Type":"ContainerDied","Data":"834116298a78ca3045dd4108d77964d8d53f60c2820248d974d9e19ec9811917"} Nov 25 10:58:32 crc kubenswrapper[4932]: I1125 10:58:32.857397 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lt9c4" event={"ID":"5f1fb940-d756-4280-bfe8-f9d26095f04f","Type":"ContainerStarted","Data":"b1fce813da20cd0b57b7a5a3276ae7a92404741eb35b6eab8ed7501f3e13b9de"} Nov 25 10:58:33 crc kubenswrapper[4932]: I1125 10:58:33.869023 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lt9c4" event={"ID":"5f1fb940-d756-4280-bfe8-f9d26095f04f","Type":"ContainerStarted","Data":"2bf0dfb17f1b03fbf269c9d16b53b7ead6fa619c231152ad476c06d8e8fc35ed"} Nov 25 10:58:34 crc kubenswrapper[4932]: I1125 10:58:34.880272 4932 generic.go:334] "Generic (PLEG): container finished" podID="5f1fb940-d756-4280-bfe8-f9d26095f04f" containerID="2bf0dfb17f1b03fbf269c9d16b53b7ead6fa619c231152ad476c06d8e8fc35ed" exitCode=0 Nov 25 10:58:34 crc kubenswrapper[4932]: I1125 10:58:34.880376 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lt9c4" event={"ID":"5f1fb940-d756-4280-bfe8-f9d26095f04f","Type":"ContainerDied","Data":"2bf0dfb17f1b03fbf269c9d16b53b7ead6fa619c231152ad476c06d8e8fc35ed"} Nov 25 10:58:35 crc kubenswrapper[4932]: I1125 10:58:35.894288 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lt9c4" event={"ID":"5f1fb940-d756-4280-bfe8-f9d26095f04f","Type":"ContainerStarted","Data":"f0d9fea2c3af74c5952fde5e75a64438bc500847be40049718c494226c535048"} Nov 25 10:58:35 crc kubenswrapper[4932]: I1125 10:58:35.923311 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lt9c4" podStartSLOduration=2.356183413 podStartE2EDuration="4.923289057s" podCreationTimestamp="2025-11-25 10:58:31 +0000 UTC" firstStartedPulling="2025-11-25 10:58:32.859458764 +0000 UTC m=+7772.985488327" lastFinishedPulling="2025-11-25 10:58:35.426564408 +0000 UTC m=+7775.552593971" observedRunningTime="2025-11-25 10:58:35.91502122 +0000 UTC m=+7776.041050823" watchObservedRunningTime="2025-11-25 10:58:35.923289057 +0000 UTC m=+7776.049318630" Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.579127 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6bjwb"] Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.595606 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6bjwb"] Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.595752 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.790365 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-catalog-content\") pod \"certified-operators-6bjwb\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.791771 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-utilities\") pod \"certified-operators-6bjwb\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.791828 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm9f9\" (UniqueName: \"kubernetes.io/projected/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-kube-api-access-xm9f9\") pod \"certified-operators-6bjwb\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.894030 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-catalog-content\") pod \"certified-operators-6bjwb\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.894136 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-utilities\") pod \"certified-operators-6bjwb\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.894170 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm9f9\" (UniqueName: \"kubernetes.io/projected/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-kube-api-access-xm9f9\") pod \"certified-operators-6bjwb\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.895181 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-utilities\") pod \"certified-operators-6bjwb\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.895449 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-catalog-content\") pod \"certified-operators-6bjwb\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.931685 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm9f9\" (UniqueName: \"kubernetes.io/projected/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-kube-api-access-xm9f9\") pod \"certified-operators-6bjwb\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:36 crc kubenswrapper[4932]: I1125 10:58:36.937491 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:37 crc kubenswrapper[4932]: I1125 10:58:37.182595 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 10:58:37 crc kubenswrapper[4932]: I1125 10:58:37.182965 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 10:58:37 crc kubenswrapper[4932]: I1125 10:58:37.183015 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 10:58:37 crc kubenswrapper[4932]: I1125 10:58:37.183922 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 10:58:37 crc kubenswrapper[4932]: I1125 10:58:37.183986 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" gracePeriod=600 Nov 25 10:58:37 crc kubenswrapper[4932]: E1125 10:58:37.328989 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:58:37 crc kubenswrapper[4932]: I1125 10:58:37.514920 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6bjwb"] Nov 25 10:58:37 crc kubenswrapper[4932]: W1125 10:58:37.520402 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4018bc08_5a6b_44f6_b420_d6b4fce4f0a0.slice/crio-88040b4bac5db138aa52fe4219bd9e30d5c7dc299811dedaa6d6024486b4e3fb WatchSource:0}: Error finding container 88040b4bac5db138aa52fe4219bd9e30d5c7dc299811dedaa6d6024486b4e3fb: Status 404 returned error can't find the container with id 88040b4bac5db138aa52fe4219bd9e30d5c7dc299811dedaa6d6024486b4e3fb Nov 25 10:58:37 crc kubenswrapper[4932]: I1125 10:58:37.914007 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" exitCode=0 Nov 25 10:58:37 crc kubenswrapper[4932]: I1125 10:58:37.914099 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b"} Nov 25 10:58:37 crc kubenswrapper[4932]: I1125 10:58:37.914394 4932 scope.go:117] "RemoveContainer" containerID="f25878df9a1b0709ed13ec05916688940d18560b65912b6aa892c7e65177215a" Nov 25 10:58:37 crc kubenswrapper[4932]: I1125 10:58:37.915211 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 10:58:37 crc kubenswrapper[4932]: E1125 10:58:37.915540 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:58:37 crc kubenswrapper[4932]: I1125 10:58:37.917383 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bjwb" event={"ID":"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0","Type":"ContainerStarted","Data":"88040b4bac5db138aa52fe4219bd9e30d5c7dc299811dedaa6d6024486b4e3fb"} Nov 25 10:58:38 crc kubenswrapper[4932]: I1125 10:58:38.931519 4932 generic.go:334] "Generic (PLEG): container finished" podID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" containerID="2301af2d7d851ec7a0f9c84538c2c4084023ae00cd6e5d2780dcee21d4f1778b" exitCode=0 Nov 25 10:58:38 crc kubenswrapper[4932]: I1125 10:58:38.931844 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bjwb" event={"ID":"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0","Type":"ContainerDied","Data":"2301af2d7d851ec7a0f9c84538c2c4084023ae00cd6e5d2780dcee21d4f1778b"} Nov 25 10:58:40 crc kubenswrapper[4932]: I1125 10:58:40.959163 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bjwb" event={"ID":"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0","Type":"ContainerStarted","Data":"40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5"} Nov 25 10:58:41 crc kubenswrapper[4932]: I1125 10:58:41.703720 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:41 crc kubenswrapper[4932]: I1125 10:58:41.704002 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:41 crc kubenswrapper[4932]: I1125 10:58:41.761785 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:42 crc kubenswrapper[4932]: I1125 10:58:42.031774 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:42 crc kubenswrapper[4932]: I1125 10:58:42.984434 4932 generic.go:334] "Generic (PLEG): container finished" podID="a0bb63e9-064b-4970-b98f-a5e8cf377302" containerID="19b73eeff228a87fd36d69148adb384cb7fb7b6449c07e8e9c1f587033a6c506" exitCode=0 Nov 25 10:58:42 crc kubenswrapper[4932]: I1125 10:58:42.984513 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-plnq9" event={"ID":"a0bb63e9-064b-4970-b98f-a5e8cf377302","Type":"ContainerDied","Data":"19b73eeff228a87fd36d69148adb384cb7fb7b6449c07e8e9c1f587033a6c506"} Nov 25 10:58:43 crc kubenswrapper[4932]: I1125 10:58:43.994813 4932 generic.go:334] "Generic (PLEG): container finished" podID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" containerID="40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5" exitCode=0 Nov 25 10:58:43 crc kubenswrapper[4932]: I1125 10:58:43.996241 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bjwb" event={"ID":"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0","Type":"ContainerDied","Data":"40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5"} Nov 25 10:58:44 crc kubenswrapper[4932]: I1125 10:58:44.430664 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:58:44 crc kubenswrapper[4932]: I1125 10:58:44.576783 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-inventory\") pod \"a0bb63e9-064b-4970-b98f-a5e8cf377302\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " Nov 25 10:58:44 crc kubenswrapper[4932]: I1125 10:58:44.576992 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cx54\" (UniqueName: \"kubernetes.io/projected/a0bb63e9-064b-4970-b98f-a5e8cf377302-kube-api-access-4cx54\") pod \"a0bb63e9-064b-4970-b98f-a5e8cf377302\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " Nov 25 10:58:44 crc kubenswrapper[4932]: I1125 10:58:44.577690 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-ssh-key\") pod \"a0bb63e9-064b-4970-b98f-a5e8cf377302\" (UID: \"a0bb63e9-064b-4970-b98f-a5e8cf377302\") " Nov 25 10:58:44 crc kubenswrapper[4932]: I1125 10:58:44.583636 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0bb63e9-064b-4970-b98f-a5e8cf377302-kube-api-access-4cx54" (OuterVolumeSpecName: "kube-api-access-4cx54") pod "a0bb63e9-064b-4970-b98f-a5e8cf377302" (UID: "a0bb63e9-064b-4970-b98f-a5e8cf377302"). InnerVolumeSpecName "kube-api-access-4cx54". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:58:44 crc kubenswrapper[4932]: I1125 10:58:44.634441 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a0bb63e9-064b-4970-b98f-a5e8cf377302" (UID: "a0bb63e9-064b-4970-b98f-a5e8cf377302"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:58:44 crc kubenswrapper[4932]: I1125 10:58:44.637477 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-inventory" (OuterVolumeSpecName: "inventory") pod "a0bb63e9-064b-4970-b98f-a5e8cf377302" (UID: "a0bb63e9-064b-4970-b98f-a5e8cf377302"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:58:44 crc kubenswrapper[4932]: I1125 10:58:44.680823 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:44 crc kubenswrapper[4932]: I1125 10:58:44.680860 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a0bb63e9-064b-4970-b98f-a5e8cf377302-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:44 crc kubenswrapper[4932]: I1125 10:58:44.680871 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cx54\" (UniqueName: \"kubernetes.io/projected/a0bb63e9-064b-4970-b98f-a5e8cf377302-kube-api-access-4cx54\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.011132 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-plnq9" event={"ID":"a0bb63e9-064b-4970-b98f-a5e8cf377302","Type":"ContainerDied","Data":"b74abc2c2bc90d108b8eaaf90a34e6e250a70bf1f6524bacaa0f33af6c9b59cc"} Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.011594 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b74abc2c2bc90d108b8eaaf90a34e6e250a70bf1f6524bacaa0f33af6c9b59cc" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.011698 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-plnq9" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.096593 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-openstack-mkz72"] Nov 25 10:58:45 crc kubenswrapper[4932]: E1125 10:58:45.097114 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0bb63e9-064b-4970-b98f-a5e8cf377302" containerName="configure-os-openstack-openstack-cell1" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.097134 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0bb63e9-064b-4970-b98f-a5e8cf377302" containerName="configure-os-openstack-openstack-cell1" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.097394 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0bb63e9-064b-4970-b98f-a5e8cf377302" containerName="configure-os-openstack-openstack-cell1" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.098296 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.101282 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.102525 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.102650 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.102669 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.106770 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-mkz72"] Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.292367 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnshg\" (UniqueName: \"kubernetes.io/projected/85927115-0a8a-41bb-a29d-71d6b85311aa-kube-api-access-rnshg\") pod \"ssh-known-hosts-openstack-mkz72\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.292507 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-mkz72\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.292539 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-inventory-0\") pod \"ssh-known-hosts-openstack-mkz72\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.394739 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnshg\" (UniqueName: \"kubernetes.io/projected/85927115-0a8a-41bb-a29d-71d6b85311aa-kube-api-access-rnshg\") pod \"ssh-known-hosts-openstack-mkz72\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.394872 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-mkz72\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.394906 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-inventory-0\") pod \"ssh-known-hosts-openstack-mkz72\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.400467 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-inventory-0\") pod \"ssh-known-hosts-openstack-mkz72\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.400914 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-mkz72\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.430633 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnshg\" (UniqueName: \"kubernetes.io/projected/85927115-0a8a-41bb-a29d-71d6b85311aa-kube-api-access-rnshg\") pod \"ssh-known-hosts-openstack-mkz72\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:45 crc kubenswrapper[4932]: I1125 10:58:45.435492 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:46 crc kubenswrapper[4932]: I1125 10:58:46.028686 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bjwb" event={"ID":"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0","Type":"ContainerStarted","Data":"48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097"} Nov 25 10:58:46 crc kubenswrapper[4932]: I1125 10:58:46.053772 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6bjwb" podStartSLOduration=4.320442721 podStartE2EDuration="10.053748293s" podCreationTimestamp="2025-11-25 10:58:36 +0000 UTC" firstStartedPulling="2025-11-25 10:58:38.93306034 +0000 UTC m=+7779.059089903" lastFinishedPulling="2025-11-25 10:58:44.666365912 +0000 UTC m=+7784.792395475" observedRunningTime="2025-11-25 10:58:46.047202635 +0000 UTC m=+7786.173232208" watchObservedRunningTime="2025-11-25 10:58:46.053748293 +0000 UTC m=+7786.179777856" Nov 25 10:58:46 crc kubenswrapper[4932]: I1125 10:58:46.324500 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-mkz72"] Nov 25 10:58:46 crc kubenswrapper[4932]: W1125 10:58:46.330803 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod85927115_0a8a_41bb_a29d_71d6b85311aa.slice/crio-d5598c938659c0388fcdef82d9fee74ae3dc3d93cb27107296ab15f9b1835dfa WatchSource:0}: Error finding container d5598c938659c0388fcdef82d9fee74ae3dc3d93cb27107296ab15f9b1835dfa: Status 404 returned error can't find the container with id d5598c938659c0388fcdef82d9fee74ae3dc3d93cb27107296ab15f9b1835dfa Nov 25 10:58:46 crc kubenswrapper[4932]: I1125 10:58:46.580517 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lt9c4"] Nov 25 10:58:46 crc kubenswrapper[4932]: I1125 10:58:46.580872 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lt9c4" podUID="5f1fb940-d756-4280-bfe8-f9d26095f04f" containerName="registry-server" containerID="cri-o://f0d9fea2c3af74c5952fde5e75a64438bc500847be40049718c494226c535048" gracePeriod=2 Nov 25 10:58:46 crc kubenswrapper[4932]: I1125 10:58:46.938599 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:46 crc kubenswrapper[4932]: I1125 10:58:46.938931 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:46 crc kubenswrapper[4932]: I1125 10:58:46.989495 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:47 crc kubenswrapper[4932]: I1125 10:58:47.038957 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-mkz72" event={"ID":"85927115-0a8a-41bb-a29d-71d6b85311aa","Type":"ContainerStarted","Data":"d5598c938659c0388fcdef82d9fee74ae3dc3d93cb27107296ab15f9b1835dfa"} Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.059566 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lt9c4" event={"ID":"5f1fb940-d756-4280-bfe8-f9d26095f04f","Type":"ContainerDied","Data":"f0d9fea2c3af74c5952fde5e75a64438bc500847be40049718c494226c535048"} Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.059494 4932 generic.go:334] "Generic (PLEG): container finished" podID="5f1fb940-d756-4280-bfe8-f9d26095f04f" containerID="f0d9fea2c3af74c5952fde5e75a64438bc500847be40049718c494226c535048" exitCode=0 Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.195405 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.268626 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-utilities\") pod \"5f1fb940-d756-4280-bfe8-f9d26095f04f\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.268814 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvlt8\" (UniqueName: \"kubernetes.io/projected/5f1fb940-d756-4280-bfe8-f9d26095f04f-kube-api-access-dvlt8\") pod \"5f1fb940-d756-4280-bfe8-f9d26095f04f\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.268841 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-catalog-content\") pod \"5f1fb940-d756-4280-bfe8-f9d26095f04f\" (UID: \"5f1fb940-d756-4280-bfe8-f9d26095f04f\") " Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.271905 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-utilities" (OuterVolumeSpecName: "utilities") pod "5f1fb940-d756-4280-bfe8-f9d26095f04f" (UID: "5f1fb940-d756-4280-bfe8-f9d26095f04f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.275528 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f1fb940-d756-4280-bfe8-f9d26095f04f-kube-api-access-dvlt8" (OuterVolumeSpecName: "kube-api-access-dvlt8") pod "5f1fb940-d756-4280-bfe8-f9d26095f04f" (UID: "5f1fb940-d756-4280-bfe8-f9d26095f04f"). InnerVolumeSpecName "kube-api-access-dvlt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.288912 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f1fb940-d756-4280-bfe8-f9d26095f04f" (UID: "5f1fb940-d756-4280-bfe8-f9d26095f04f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.371916 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvlt8\" (UniqueName: \"kubernetes.io/projected/5f1fb940-d756-4280-bfe8-f9d26095f04f-kube-api-access-dvlt8\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.371964 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:48 crc kubenswrapper[4932]: I1125 10:58:48.371976 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f1fb940-d756-4280-bfe8-f9d26095f04f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:49 crc kubenswrapper[4932]: I1125 10:58:49.070704 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-mkz72" event={"ID":"85927115-0a8a-41bb-a29d-71d6b85311aa","Type":"ContainerStarted","Data":"05e728d92b8de63cc01623a412eb7f715f0d8027c06e618393bf35aebbc6dd27"} Nov 25 10:58:49 crc kubenswrapper[4932]: I1125 10:58:49.075350 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lt9c4" event={"ID":"5f1fb940-d756-4280-bfe8-f9d26095f04f","Type":"ContainerDied","Data":"b1fce813da20cd0b57b7a5a3276ae7a92404741eb35b6eab8ed7501f3e13b9de"} Nov 25 10:58:49 crc kubenswrapper[4932]: I1125 10:58:49.075425 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lt9c4" Nov 25 10:58:49 crc kubenswrapper[4932]: I1125 10:58:49.075448 4932 scope.go:117] "RemoveContainer" containerID="f0d9fea2c3af74c5952fde5e75a64438bc500847be40049718c494226c535048" Nov 25 10:58:49 crc kubenswrapper[4932]: I1125 10:58:49.090638 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-openstack-mkz72" podStartSLOduration=2.486230577 podStartE2EDuration="4.090623012s" podCreationTimestamp="2025-11-25 10:58:45 +0000 UTC" firstStartedPulling="2025-11-25 10:58:46.333272902 +0000 UTC m=+7786.459302475" lastFinishedPulling="2025-11-25 10:58:47.937665347 +0000 UTC m=+7788.063694910" observedRunningTime="2025-11-25 10:58:49.089949943 +0000 UTC m=+7789.215979506" watchObservedRunningTime="2025-11-25 10:58:49.090623012 +0000 UTC m=+7789.216652575" Nov 25 10:58:49 crc kubenswrapper[4932]: I1125 10:58:49.114507 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lt9c4"] Nov 25 10:58:49 crc kubenswrapper[4932]: I1125 10:58:49.118487 4932 scope.go:117] "RemoveContainer" containerID="2bf0dfb17f1b03fbf269c9d16b53b7ead6fa619c231152ad476c06d8e8fc35ed" Nov 25 10:58:49 crc kubenswrapper[4932]: I1125 10:58:49.123323 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lt9c4"] Nov 25 10:58:49 crc kubenswrapper[4932]: I1125 10:58:49.139067 4932 scope.go:117] "RemoveContainer" containerID="834116298a78ca3045dd4108d77964d8d53f60c2820248d974d9e19ec9811917" Nov 25 10:58:50 crc kubenswrapper[4932]: I1125 10:58:50.614044 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 10:58:50 crc kubenswrapper[4932]: E1125 10:58:50.614633 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:58:50 crc kubenswrapper[4932]: I1125 10:58:50.617162 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f1fb940-d756-4280-bfe8-f9d26095f04f" path="/var/lib/kubelet/pods/5f1fb940-d756-4280-bfe8-f9d26095f04f/volumes" Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.001964 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.047263 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6bjwb"] Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.148656 4932 generic.go:334] "Generic (PLEG): container finished" podID="85927115-0a8a-41bb-a29d-71d6b85311aa" containerID="05e728d92b8de63cc01623a412eb7f715f0d8027c06e618393bf35aebbc6dd27" exitCode=0 Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.148713 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-mkz72" event={"ID":"85927115-0a8a-41bb-a29d-71d6b85311aa","Type":"ContainerDied","Data":"05e728d92b8de63cc01623a412eb7f715f0d8027c06e618393bf35aebbc6dd27"} Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.148843 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6bjwb" podUID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" containerName="registry-server" containerID="cri-o://48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097" gracePeriod=2 Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.617577 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.672277 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-catalog-content\") pod \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.672374 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-utilities\") pod \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.672401 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xm9f9\" (UniqueName: \"kubernetes.io/projected/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-kube-api-access-xm9f9\") pod \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\" (UID: \"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0\") " Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.673715 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-utilities" (OuterVolumeSpecName: "utilities") pod "4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" (UID: "4018bc08-5a6b-44f6-b420-d6b4fce4f0a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.674286 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.686513 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-kube-api-access-xm9f9" (OuterVolumeSpecName: "kube-api-access-xm9f9") pod "4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" (UID: "4018bc08-5a6b-44f6-b420-d6b4fce4f0a0"). InnerVolumeSpecName "kube-api-access-xm9f9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.723916 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" (UID: "4018bc08-5a6b-44f6-b420-d6b4fce4f0a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.776667 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:57 crc kubenswrapper[4932]: I1125 10:58:57.776723 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xm9f9\" (UniqueName: \"kubernetes.io/projected/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0-kube-api-access-xm9f9\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.159831 4932 generic.go:334] "Generic (PLEG): container finished" podID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" containerID="48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097" exitCode=0 Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.159912 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6bjwb" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.159921 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bjwb" event={"ID":"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0","Type":"ContainerDied","Data":"48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097"} Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.161483 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6bjwb" event={"ID":"4018bc08-5a6b-44f6-b420-d6b4fce4f0a0","Type":"ContainerDied","Data":"88040b4bac5db138aa52fe4219bd9e30d5c7dc299811dedaa6d6024486b4e3fb"} Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.161509 4932 scope.go:117] "RemoveContainer" containerID="48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.185435 4932 scope.go:117] "RemoveContainer" containerID="40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.203446 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6bjwb"] Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.219750 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6bjwb"] Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.226784 4932 scope.go:117] "RemoveContainer" containerID="2301af2d7d851ec7a0f9c84538c2c4084023ae00cd6e5d2780dcee21d4f1778b" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.267775 4932 scope.go:117] "RemoveContainer" containerID="48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097" Nov 25 10:58:58 crc kubenswrapper[4932]: E1125 10:58:58.268364 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097\": container with ID starting with 48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097 not found: ID does not exist" containerID="48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.268393 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097"} err="failed to get container status \"48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097\": rpc error: code = NotFound desc = could not find container \"48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097\": container with ID starting with 48a5b51e365e0148fe3a8b0be6e7dfe866df09fdc13d1ea26ee18e426fffc097 not found: ID does not exist" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.268413 4932 scope.go:117] "RemoveContainer" containerID="40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5" Nov 25 10:58:58 crc kubenswrapper[4932]: E1125 10:58:58.268649 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5\": container with ID starting with 40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5 not found: ID does not exist" containerID="40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.268671 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5"} err="failed to get container status \"40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5\": rpc error: code = NotFound desc = could not find container \"40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5\": container with ID starting with 40c6667d1693f8b12bebb623fe6c8a6996a8eea1eb780accb2393a40456ad3b5 not found: ID does not exist" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.268686 4932 scope.go:117] "RemoveContainer" containerID="2301af2d7d851ec7a0f9c84538c2c4084023ae00cd6e5d2780dcee21d4f1778b" Nov 25 10:58:58 crc kubenswrapper[4932]: E1125 10:58:58.269295 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2301af2d7d851ec7a0f9c84538c2c4084023ae00cd6e5d2780dcee21d4f1778b\": container with ID starting with 2301af2d7d851ec7a0f9c84538c2c4084023ae00cd6e5d2780dcee21d4f1778b not found: ID does not exist" containerID="2301af2d7d851ec7a0f9c84538c2c4084023ae00cd6e5d2780dcee21d4f1778b" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.269337 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2301af2d7d851ec7a0f9c84538c2c4084023ae00cd6e5d2780dcee21d4f1778b"} err="failed to get container status \"2301af2d7d851ec7a0f9c84538c2c4084023ae00cd6e5d2780dcee21d4f1778b\": rpc error: code = NotFound desc = could not find container \"2301af2d7d851ec7a0f9c84538c2c4084023ae00cd6e5d2780dcee21d4f1778b\": container with ID starting with 2301af2d7d851ec7a0f9c84538c2c4084023ae00cd6e5d2780dcee21d4f1778b not found: ID does not exist" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.603446 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.620297 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" path="/var/lib/kubelet/pods/4018bc08-5a6b-44f6-b420-d6b4fce4f0a0/volumes" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.699183 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-ssh-key-openstack-cell1\") pod \"85927115-0a8a-41bb-a29d-71d6b85311aa\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.700014 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-inventory-0\") pod \"85927115-0a8a-41bb-a29d-71d6b85311aa\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.700089 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnshg\" (UniqueName: \"kubernetes.io/projected/85927115-0a8a-41bb-a29d-71d6b85311aa-kube-api-access-rnshg\") pod \"85927115-0a8a-41bb-a29d-71d6b85311aa\" (UID: \"85927115-0a8a-41bb-a29d-71d6b85311aa\") " Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.705536 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85927115-0a8a-41bb-a29d-71d6b85311aa-kube-api-access-rnshg" (OuterVolumeSpecName: "kube-api-access-rnshg") pod "85927115-0a8a-41bb-a29d-71d6b85311aa" (UID: "85927115-0a8a-41bb-a29d-71d6b85311aa"). InnerVolumeSpecName "kube-api-access-rnshg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.728880 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "85927115-0a8a-41bb-a29d-71d6b85311aa" (UID: "85927115-0a8a-41bb-a29d-71d6b85311aa"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.734230 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "85927115-0a8a-41bb-a29d-71d6b85311aa" (UID: "85927115-0a8a-41bb-a29d-71d6b85311aa"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.803071 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.803106 4932 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/85927115-0a8a-41bb-a29d-71d6b85311aa-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:58 crc kubenswrapper[4932]: I1125 10:58:58.803117 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnshg\" (UniqueName: \"kubernetes.io/projected/85927115-0a8a-41bb-a29d-71d6b85311aa-kube-api-access-rnshg\") on node \"crc\" DevicePath \"\"" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.177132 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-mkz72" event={"ID":"85927115-0a8a-41bb-a29d-71d6b85311aa","Type":"ContainerDied","Data":"d5598c938659c0388fcdef82d9fee74ae3dc3d93cb27107296ab15f9b1835dfa"} Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.177240 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-mkz72" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.177310 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d5598c938659c0388fcdef82d9fee74ae3dc3d93cb27107296ab15f9b1835dfa" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.270536 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-openstack-openstack-cell1-vqq2m"] Nov 25 10:58:59 crc kubenswrapper[4932]: E1125 10:58:59.271926 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" containerName="registry-server" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.271954 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" containerName="registry-server" Nov 25 10:58:59 crc kubenswrapper[4932]: E1125 10:58:59.271977 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f1fb940-d756-4280-bfe8-f9d26095f04f" containerName="registry-server" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.271982 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f1fb940-d756-4280-bfe8-f9d26095f04f" containerName="registry-server" Nov 25 10:58:59 crc kubenswrapper[4932]: E1125 10:58:59.272000 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f1fb940-d756-4280-bfe8-f9d26095f04f" containerName="extract-utilities" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.272007 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f1fb940-d756-4280-bfe8-f9d26095f04f" containerName="extract-utilities" Nov 25 10:58:59 crc kubenswrapper[4932]: E1125 10:58:59.272019 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" containerName="extract-utilities" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.272025 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" containerName="extract-utilities" Nov 25 10:58:59 crc kubenswrapper[4932]: E1125 10:58:59.272037 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" containerName="extract-content" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.272044 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" containerName="extract-content" Nov 25 10:58:59 crc kubenswrapper[4932]: E1125 10:58:59.272062 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85927115-0a8a-41bb-a29d-71d6b85311aa" containerName="ssh-known-hosts-openstack" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.272068 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="85927115-0a8a-41bb-a29d-71d6b85311aa" containerName="ssh-known-hosts-openstack" Nov 25 10:58:59 crc kubenswrapper[4932]: E1125 10:58:59.272090 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f1fb940-d756-4280-bfe8-f9d26095f04f" containerName="extract-content" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.272095 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f1fb940-d756-4280-bfe8-f9d26095f04f" containerName="extract-content" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.272430 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f1fb940-d756-4280-bfe8-f9d26095f04f" containerName="registry-server" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.272445 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4018bc08-5a6b-44f6-b420-d6b4fce4f0a0" containerName="registry-server" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.272456 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="85927115-0a8a-41bb-a29d-71d6b85311aa" containerName="ssh-known-hosts-openstack" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.273604 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.276391 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.276507 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.279481 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.285309 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-vqq2m"] Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.294580 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.313656 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8wcn\" (UniqueName: \"kubernetes.io/projected/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-kube-api-access-x8wcn\") pod \"run-os-openstack-openstack-cell1-vqq2m\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.313760 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-ssh-key\") pod \"run-os-openstack-openstack-cell1-vqq2m\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.313793 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-inventory\") pod \"run-os-openstack-openstack-cell1-vqq2m\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.416209 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8wcn\" (UniqueName: \"kubernetes.io/projected/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-kube-api-access-x8wcn\") pod \"run-os-openstack-openstack-cell1-vqq2m\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.416434 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-ssh-key\") pod \"run-os-openstack-openstack-cell1-vqq2m\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.416483 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-inventory\") pod \"run-os-openstack-openstack-cell1-vqq2m\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.434146 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-ssh-key\") pod \"run-os-openstack-openstack-cell1-vqq2m\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.434292 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-inventory\") pod \"run-os-openstack-openstack-cell1-vqq2m\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.439989 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8wcn\" (UniqueName: \"kubernetes.io/projected/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-kube-api-access-x8wcn\") pod \"run-os-openstack-openstack-cell1-vqq2m\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:58:59 crc kubenswrapper[4932]: I1125 10:58:59.606135 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:59:00 crc kubenswrapper[4932]: I1125 10:59:00.136361 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-vqq2m"] Nov 25 10:59:00 crc kubenswrapper[4932]: I1125 10:59:00.187726 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-vqq2m" event={"ID":"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af","Type":"ContainerStarted","Data":"6f8d201af5fb8a2e2afc85cadd5fee9446ce069361688e48371064a852dbe0ee"} Nov 25 10:59:01 crc kubenswrapper[4932]: I1125 10:59:01.198814 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-vqq2m" event={"ID":"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af","Type":"ContainerStarted","Data":"885f22e7b00287ef693938358bdfa28ecb78e96003d16eab4b02dff4bdf196af"} Nov 25 10:59:02 crc kubenswrapper[4932]: I1125 10:59:02.609303 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 10:59:02 crc kubenswrapper[4932]: E1125 10:59:02.609912 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:59:09 crc kubenswrapper[4932]: I1125 10:59:09.275132 4932 generic.go:334] "Generic (PLEG): container finished" podID="cbb26a71-0dd5-4d95-b4e7-d28d7654d0af" containerID="885f22e7b00287ef693938358bdfa28ecb78e96003d16eab4b02dff4bdf196af" exitCode=0 Nov 25 10:59:09 crc kubenswrapper[4932]: I1125 10:59:09.275235 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-vqq2m" event={"ID":"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af","Type":"ContainerDied","Data":"885f22e7b00287ef693938358bdfa28ecb78e96003d16eab4b02dff4bdf196af"} Nov 25 10:59:10 crc kubenswrapper[4932]: I1125 10:59:10.714139 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:59:10 crc kubenswrapper[4932]: I1125 10:59:10.776373 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-ssh-key\") pod \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " Nov 25 10:59:10 crc kubenswrapper[4932]: I1125 10:59:10.776435 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-inventory\") pod \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " Nov 25 10:59:10 crc kubenswrapper[4932]: I1125 10:59:10.776677 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8wcn\" (UniqueName: \"kubernetes.io/projected/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-kube-api-access-x8wcn\") pod \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\" (UID: \"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af\") " Nov 25 10:59:10 crc kubenswrapper[4932]: I1125 10:59:10.784575 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-kube-api-access-x8wcn" (OuterVolumeSpecName: "kube-api-access-x8wcn") pod "cbb26a71-0dd5-4d95-b4e7-d28d7654d0af" (UID: "cbb26a71-0dd5-4d95-b4e7-d28d7654d0af"). InnerVolumeSpecName "kube-api-access-x8wcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:59:10 crc kubenswrapper[4932]: I1125 10:59:10.814762 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cbb26a71-0dd5-4d95-b4e7-d28d7654d0af" (UID: "cbb26a71-0dd5-4d95-b4e7-d28d7654d0af"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:59:10 crc kubenswrapper[4932]: I1125 10:59:10.820866 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-inventory" (OuterVolumeSpecName: "inventory") pod "cbb26a71-0dd5-4d95-b4e7-d28d7654d0af" (UID: "cbb26a71-0dd5-4d95-b4e7-d28d7654d0af"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:59:10 crc kubenswrapper[4932]: I1125 10:59:10.879577 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:59:10 crc kubenswrapper[4932]: I1125 10:59:10.879620 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:59:10 crc kubenswrapper[4932]: I1125 10:59:10.879639 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8wcn\" (UniqueName: \"kubernetes.io/projected/cbb26a71-0dd5-4d95-b4e7-d28d7654d0af-kube-api-access-x8wcn\") on node \"crc\" DevicePath \"\"" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.314230 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-vqq2m" event={"ID":"cbb26a71-0dd5-4d95-b4e7-d28d7654d0af","Type":"ContainerDied","Data":"6f8d201af5fb8a2e2afc85cadd5fee9446ce069361688e48371064a852dbe0ee"} Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.314604 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f8d201af5fb8a2e2afc85cadd5fee9446ce069361688e48371064a852dbe0ee" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.314274 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-vqq2m" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.369380 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-j896v"] Nov 25 10:59:11 crc kubenswrapper[4932]: E1125 10:59:11.370247 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbb26a71-0dd5-4d95-b4e7-d28d7654d0af" containerName="run-os-openstack-openstack-cell1" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.370270 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbb26a71-0dd5-4d95-b4e7-d28d7654d0af" containerName="run-os-openstack-openstack-cell1" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.370561 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbb26a71-0dd5-4d95-b4e7-d28d7654d0af" containerName="run-os-openstack-openstack-cell1" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.371744 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.373781 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.374546 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.374678 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.374826 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.380631 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-j896v"] Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.391329 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-inventory\") pod \"reboot-os-openstack-openstack-cell1-j896v\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.391418 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sf7t9\" (UniqueName: \"kubernetes.io/projected/eaa0ba79-6483-49fb-a996-390914150be2-kube-api-access-sf7t9\") pod \"reboot-os-openstack-openstack-cell1-j896v\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.391635 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-j896v\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.494126 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-inventory\") pod \"reboot-os-openstack-openstack-cell1-j896v\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.494219 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sf7t9\" (UniqueName: \"kubernetes.io/projected/eaa0ba79-6483-49fb-a996-390914150be2-kube-api-access-sf7t9\") pod \"reboot-os-openstack-openstack-cell1-j896v\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.494359 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-j896v\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.500742 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-inventory\") pod \"reboot-os-openstack-openstack-cell1-j896v\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.501909 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-j896v\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.517840 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sf7t9\" (UniqueName: \"kubernetes.io/projected/eaa0ba79-6483-49fb-a996-390914150be2-kube-api-access-sf7t9\") pod \"reboot-os-openstack-openstack-cell1-j896v\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:11 crc kubenswrapper[4932]: I1125 10:59:11.688508 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:12 crc kubenswrapper[4932]: I1125 10:59:12.242806 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-j896v"] Nov 25 10:59:12 crc kubenswrapper[4932]: I1125 10:59:12.322515 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-j896v" event={"ID":"eaa0ba79-6483-49fb-a996-390914150be2","Type":"ContainerStarted","Data":"24c5335bb55a0c2d72042e2cc25419fbaa2df656aac671b26e8cdfde8d8fc6e8"} Nov 25 10:59:13 crc kubenswrapper[4932]: I1125 10:59:13.342720 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-j896v" event={"ID":"eaa0ba79-6483-49fb-a996-390914150be2","Type":"ContainerStarted","Data":"4ce698a7efe4c841808ab1062bd01c97180afb60e1d8e2984b203b5aea4f1af9"} Nov 25 10:59:13 crc kubenswrapper[4932]: I1125 10:59:13.369365 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-openstack-openstack-cell1-j896v" podStartSLOduration=1.9708850999999998 podStartE2EDuration="2.369345861s" podCreationTimestamp="2025-11-25 10:59:11 +0000 UTC" firstStartedPulling="2025-11-25 10:59:12.272435664 +0000 UTC m=+7812.398465227" lastFinishedPulling="2025-11-25 10:59:12.670896425 +0000 UTC m=+7812.796925988" observedRunningTime="2025-11-25 10:59:13.359666694 +0000 UTC m=+7813.485696267" watchObservedRunningTime="2025-11-25 10:59:13.369345861 +0000 UTC m=+7813.495375424" Nov 25 10:59:16 crc kubenswrapper[4932]: I1125 10:59:16.606175 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 10:59:16 crc kubenswrapper[4932]: E1125 10:59:16.607124 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:59:28 crc kubenswrapper[4932]: I1125 10:59:28.484861 4932 generic.go:334] "Generic (PLEG): container finished" podID="eaa0ba79-6483-49fb-a996-390914150be2" containerID="4ce698a7efe4c841808ab1062bd01c97180afb60e1d8e2984b203b5aea4f1af9" exitCode=0 Nov 25 10:59:28 crc kubenswrapper[4932]: I1125 10:59:28.484966 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-j896v" event={"ID":"eaa0ba79-6483-49fb-a996-390914150be2","Type":"ContainerDied","Data":"4ce698a7efe4c841808ab1062bd01c97180afb60e1d8e2984b203b5aea4f1af9"} Nov 25 10:59:28 crc kubenswrapper[4932]: I1125 10:59:28.606922 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 10:59:28 crc kubenswrapper[4932]: E1125 10:59:28.607286 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:59:29 crc kubenswrapper[4932]: I1125 10:59:29.972588 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.090422 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-ssh-key\") pod \"eaa0ba79-6483-49fb-a996-390914150be2\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.090839 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-inventory\") pod \"eaa0ba79-6483-49fb-a996-390914150be2\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.090938 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sf7t9\" (UniqueName: \"kubernetes.io/projected/eaa0ba79-6483-49fb-a996-390914150be2-kube-api-access-sf7t9\") pod \"eaa0ba79-6483-49fb-a996-390914150be2\" (UID: \"eaa0ba79-6483-49fb-a996-390914150be2\") " Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.096643 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eaa0ba79-6483-49fb-a996-390914150be2-kube-api-access-sf7t9" (OuterVolumeSpecName: "kube-api-access-sf7t9") pod "eaa0ba79-6483-49fb-a996-390914150be2" (UID: "eaa0ba79-6483-49fb-a996-390914150be2"). InnerVolumeSpecName "kube-api-access-sf7t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.126931 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "eaa0ba79-6483-49fb-a996-390914150be2" (UID: "eaa0ba79-6483-49fb-a996-390914150be2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.131019 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-inventory" (OuterVolumeSpecName: "inventory") pod "eaa0ba79-6483-49fb-a996-390914150be2" (UID: "eaa0ba79-6483-49fb-a996-390914150be2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.195821 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.196363 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eaa0ba79-6483-49fb-a996-390914150be2-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.196381 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sf7t9\" (UniqueName: \"kubernetes.io/projected/eaa0ba79-6483-49fb-a996-390914150be2-kube-api-access-sf7t9\") on node \"crc\" DevicePath \"\"" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.505536 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-j896v" event={"ID":"eaa0ba79-6483-49fb-a996-390914150be2","Type":"ContainerDied","Data":"24c5335bb55a0c2d72042e2cc25419fbaa2df656aac671b26e8cdfde8d8fc6e8"} Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.505625 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24c5335bb55a0c2d72042e2cc25419fbaa2df656aac671b26e8cdfde8d8fc6e8" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.505655 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-j896v" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.596095 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-wf5lq"] Nov 25 10:59:30 crc kubenswrapper[4932]: E1125 10:59:30.596973 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eaa0ba79-6483-49fb-a996-390914150be2" containerName="reboot-os-openstack-openstack-cell1" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.597064 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="eaa0ba79-6483-49fb-a996-390914150be2" containerName="reboot-os-openstack-openstack-cell1" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.597407 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="eaa0ba79-6483-49fb-a996-390914150be2" containerName="reboot-os-openstack-openstack-cell1" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.598756 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.602767 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.602986 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.603118 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-telemetry-default-certs-0" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.603310 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-ovn-default-certs-0" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.605746 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.605923 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-neutron-metadata-default-certs-0" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.606071 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-libvirt-default-certs-0" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.606254 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.631150 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-wf5lq"] Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.711028 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-inventory\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.711140 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-libvirt-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.711260 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-telemetry-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.711603 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ssh-key\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.711713 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.711746 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.711764 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lhsv\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-kube-api-access-9lhsv\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.711873 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.711906 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-ovn-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.712029 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.712080 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-neutron-metadata-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.712114 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.712291 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.712383 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.712445 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.814807 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ssh-key\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.814886 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.814909 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.814927 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lhsv\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-kube-api-access-9lhsv\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.814967 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.814989 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-ovn-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.815030 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.815057 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-neutron-metadata-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.815081 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.815104 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.815128 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.815149 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.815179 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-inventory\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.815229 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-libvirt-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.815256 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-telemetry-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.819853 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.820456 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.820506 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.820471 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.820644 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-telemetry-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.820757 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-ovn-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.821364 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.821507 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-neutron-metadata-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.822002 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-inventory\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.822074 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.822113 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-libvirt-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.822650 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ssh-key\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.823120 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.824270 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.834604 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lhsv\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-kube-api-access-9lhsv\") pod \"install-certs-openstack-openstack-cell1-wf5lq\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:30 crc kubenswrapper[4932]: I1125 10:59:30.917603 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 10:59:31 crc kubenswrapper[4932]: I1125 10:59:31.451915 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-wf5lq"] Nov 25 10:59:31 crc kubenswrapper[4932]: I1125 10:59:31.518904 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" event={"ID":"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621","Type":"ContainerStarted","Data":"1aacd221ca0b43e0c858fc4943fb452f0d1333b9e70b5904f58a416087fb4d6b"} Nov 25 10:59:32 crc kubenswrapper[4932]: I1125 10:59:32.529247 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" event={"ID":"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621","Type":"ContainerStarted","Data":"04d696d333eed18a48517bd3bee40db9a048640ff0cf2f7a3d622c0fc524e5bc"} Nov 25 10:59:32 crc kubenswrapper[4932]: I1125 10:59:32.555332 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" podStartSLOduration=2.137723884 podStartE2EDuration="2.555297883s" podCreationTimestamp="2025-11-25 10:59:30 +0000 UTC" firstStartedPulling="2025-11-25 10:59:31.4529653 +0000 UTC m=+7831.578994863" lastFinishedPulling="2025-11-25 10:59:31.870539299 +0000 UTC m=+7831.996568862" observedRunningTime="2025-11-25 10:59:32.550769473 +0000 UTC m=+7832.676799046" watchObservedRunningTime="2025-11-25 10:59:32.555297883 +0000 UTC m=+7832.681327486" Nov 25 10:59:43 crc kubenswrapper[4932]: I1125 10:59:43.606065 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 10:59:43 crc kubenswrapper[4932]: E1125 10:59:43.606975 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 10:59:57 crc kubenswrapper[4932]: I1125 10:59:57.606707 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 10:59:57 crc kubenswrapper[4932]: E1125 10:59:57.607598 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.149503 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94"] Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.152918 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.155820 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.156847 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.163838 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94"] Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.192609 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/967c62e1-09fd-47c2-9706-9414114f5ae1-secret-volume\") pod \"collect-profiles-29401140-rtd94\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.192727 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26bzp\" (UniqueName: \"kubernetes.io/projected/967c62e1-09fd-47c2-9706-9414114f5ae1-kube-api-access-26bzp\") pod \"collect-profiles-29401140-rtd94\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.192794 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/967c62e1-09fd-47c2-9706-9414114f5ae1-config-volume\") pod \"collect-profiles-29401140-rtd94\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.293768 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/967c62e1-09fd-47c2-9706-9414114f5ae1-secret-volume\") pod \"collect-profiles-29401140-rtd94\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.293835 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26bzp\" (UniqueName: \"kubernetes.io/projected/967c62e1-09fd-47c2-9706-9414114f5ae1-kube-api-access-26bzp\") pod \"collect-profiles-29401140-rtd94\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.293908 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/967c62e1-09fd-47c2-9706-9414114f5ae1-config-volume\") pod \"collect-profiles-29401140-rtd94\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.294975 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/967c62e1-09fd-47c2-9706-9414114f5ae1-config-volume\") pod \"collect-profiles-29401140-rtd94\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.302550 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/967c62e1-09fd-47c2-9706-9414114f5ae1-secret-volume\") pod \"collect-profiles-29401140-rtd94\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.311875 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26bzp\" (UniqueName: \"kubernetes.io/projected/967c62e1-09fd-47c2-9706-9414114f5ae1-kube-api-access-26bzp\") pod \"collect-profiles-29401140-rtd94\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.481235 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:00 crc kubenswrapper[4932]: I1125 11:00:00.952768 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94"] Nov 25 11:00:01 crc kubenswrapper[4932]: I1125 11:00:01.824439 4932 generic.go:334] "Generic (PLEG): container finished" podID="967c62e1-09fd-47c2-9706-9414114f5ae1" containerID="858c0a06b9f0f183d2b15e087284621eff46a45be09ea2f3fff10423ade08c11" exitCode=0 Nov 25 11:00:01 crc kubenswrapper[4932]: I1125 11:00:01.824552 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" event={"ID":"967c62e1-09fd-47c2-9706-9414114f5ae1","Type":"ContainerDied","Data":"858c0a06b9f0f183d2b15e087284621eff46a45be09ea2f3fff10423ade08c11"} Nov 25 11:00:01 crc kubenswrapper[4932]: I1125 11:00:01.824769 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" event={"ID":"967c62e1-09fd-47c2-9706-9414114f5ae1","Type":"ContainerStarted","Data":"89def2be7af1f23d6852abf93bddac3c1e88d7cb8cc93fe6395a7a293d1c4bae"} Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.206037 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.261918 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26bzp\" (UniqueName: \"kubernetes.io/projected/967c62e1-09fd-47c2-9706-9414114f5ae1-kube-api-access-26bzp\") pod \"967c62e1-09fd-47c2-9706-9414114f5ae1\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.262037 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/967c62e1-09fd-47c2-9706-9414114f5ae1-config-volume\") pod \"967c62e1-09fd-47c2-9706-9414114f5ae1\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.262072 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/967c62e1-09fd-47c2-9706-9414114f5ae1-secret-volume\") pod \"967c62e1-09fd-47c2-9706-9414114f5ae1\" (UID: \"967c62e1-09fd-47c2-9706-9414114f5ae1\") " Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.262802 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/967c62e1-09fd-47c2-9706-9414114f5ae1-config-volume" (OuterVolumeSpecName: "config-volume") pod "967c62e1-09fd-47c2-9706-9414114f5ae1" (UID: "967c62e1-09fd-47c2-9706-9414114f5ae1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.270403 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/967c62e1-09fd-47c2-9706-9414114f5ae1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "967c62e1-09fd-47c2-9706-9414114f5ae1" (UID: "967c62e1-09fd-47c2-9706-9414114f5ae1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.270991 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/967c62e1-09fd-47c2-9706-9414114f5ae1-kube-api-access-26bzp" (OuterVolumeSpecName: "kube-api-access-26bzp") pod "967c62e1-09fd-47c2-9706-9414114f5ae1" (UID: "967c62e1-09fd-47c2-9706-9414114f5ae1"). InnerVolumeSpecName "kube-api-access-26bzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.364032 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26bzp\" (UniqueName: \"kubernetes.io/projected/967c62e1-09fd-47c2-9706-9414114f5ae1-kube-api-access-26bzp\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.364101 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/967c62e1-09fd-47c2-9706-9414114f5ae1-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.364112 4932 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/967c62e1-09fd-47c2-9706-9414114f5ae1-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.845334 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" event={"ID":"967c62e1-09fd-47c2-9706-9414114f5ae1","Type":"ContainerDied","Data":"89def2be7af1f23d6852abf93bddac3c1e88d7cb8cc93fe6395a7a293d1c4bae"} Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.845600 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="89def2be7af1f23d6852abf93bddac3c1e88d7cb8cc93fe6395a7a293d1c4bae" Nov 25 11:00:03 crc kubenswrapper[4932]: I1125 11:00:03.845376 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401140-rtd94" Nov 25 11:00:04 crc kubenswrapper[4932]: I1125 11:00:04.284333 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb"] Nov 25 11:00:04 crc kubenswrapper[4932]: I1125 11:00:04.293368 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401095-5bmhb"] Nov 25 11:00:04 crc kubenswrapper[4932]: I1125 11:00:04.620910 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39a689b7-0688-4234-9dd8-4482c9ef03f7" path="/var/lib/kubelet/pods/39a689b7-0688-4234-9dd8-4482c9ef03f7/volumes" Nov 25 11:00:07 crc kubenswrapper[4932]: I1125 11:00:07.882910 4932 generic.go:334] "Generic (PLEG): container finished" podID="cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" containerID="04d696d333eed18a48517bd3bee40db9a048640ff0cf2f7a3d622c0fc524e5bc" exitCode=0 Nov 25 11:00:07 crc kubenswrapper[4932]: I1125 11:00:07.883008 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" event={"ID":"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621","Type":"ContainerDied","Data":"04d696d333eed18a48517bd3bee40db9a048640ff0cf2f7a3d622c0fc524e5bc"} Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.322921 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491028 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-nova-combined-ca-bundle\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491098 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-libvirt-combined-ca-bundle\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491170 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-ovn-default-certs-0\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491214 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-libvirt-default-certs-0\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491263 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9lhsv\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-kube-api-access-9lhsv\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491314 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ssh-key\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491380 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-bootstrap-combined-ca-bundle\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491420 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-inventory\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491471 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-neutron-metadata-default-certs-0\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491509 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-dhcp-combined-ca-bundle\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491535 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-metadata-combined-ca-bundle\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.491555 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ovn-combined-ca-bundle\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.492243 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-sriov-combined-ca-bundle\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.492300 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-telemetry-default-certs-0\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.492368 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-telemetry-combined-ca-bundle\") pod \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\" (UID: \"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621\") " Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.498635 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-kube-api-access-9lhsv" (OuterVolumeSpecName: "kube-api-access-9lhsv") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "kube-api-access-9lhsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.498805 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.499476 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-ovn-default-certs-0") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "openstack-cell1-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.500021 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.500144 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-libvirt-default-certs-0") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "openstack-cell1-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.501328 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-neutron-metadata-default-certs-0") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "openstack-cell1-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.501979 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.502539 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.503877 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-telemetry-default-certs-0") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "openstack-cell1-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.503963 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.504086 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.504519 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.505500 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.532608 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-inventory" (OuterVolumeSpecName: "inventory") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.536774 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" (UID: "cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595131 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595203 4932 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595217 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595227 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595239 4932 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595254 4932 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595265 4932 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595274 4932 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595284 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595294 4932 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595303 4932 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595312 4932 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595322 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595330 4932 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-openstack-cell1-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.595339 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9lhsv\" (UniqueName: \"kubernetes.io/projected/cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621-kube-api-access-9lhsv\") on node \"crc\" DevicePath \"\"" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.900593 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" event={"ID":"cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621","Type":"ContainerDied","Data":"1aacd221ca0b43e0c858fc4943fb452f0d1333b9e70b5904f58a416087fb4d6b"} Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.900920 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1aacd221ca0b43e0c858fc4943fb452f0d1333b9e70b5904f58a416087fb4d6b" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.900678 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-wf5lq" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.992590 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-openstack-openstack-cell1-rtf2t"] Nov 25 11:00:09 crc kubenswrapper[4932]: E1125 11:00:09.993029 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967c62e1-09fd-47c2-9706-9414114f5ae1" containerName="collect-profiles" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.993045 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="967c62e1-09fd-47c2-9706-9414114f5ae1" containerName="collect-profiles" Nov 25 11:00:09 crc kubenswrapper[4932]: E1125 11:00:09.993073 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" containerName="install-certs-openstack-openstack-cell1" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.993080 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" containerName="install-certs-openstack-openstack-cell1" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.993273 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="967c62e1-09fd-47c2-9706-9414114f5ae1" containerName="collect-profiles" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.993291 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf2a7b04-d1e9-4ff7-86e9-2d9c153dd621" containerName="install-certs-openstack-openstack-cell1" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.994116 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.997123 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.997299 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.997339 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 11:00:09 crc kubenswrapper[4932]: I1125 11:00:09.997783 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.003684 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.007711 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-rtf2t"] Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.019544 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.019620 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ssh-key\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.019646 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-htgmh\" (UniqueName: \"kubernetes.io/projected/e9e3f368-256c-4720-8d71-b0fb2a773e9c-kube-api-access-htgmh\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.019815 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-inventory\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.019842 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.121811 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-inventory\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.122083 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.122222 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.122332 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ssh-key\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.122396 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-htgmh\" (UniqueName: \"kubernetes.io/projected/e9e3f368-256c-4720-8d71-b0fb2a773e9c-kube-api-access-htgmh\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.123105 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.127811 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-inventory\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.128161 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.128663 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ssh-key\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.137824 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-htgmh\" (UniqueName: \"kubernetes.io/projected/e9e3f368-256c-4720-8d71-b0fb2a773e9c-kube-api-access-htgmh\") pod \"ovn-openstack-openstack-cell1-rtf2t\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.317774 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.857285 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-rtf2t"] Nov 25 11:00:10 crc kubenswrapper[4932]: I1125 11:00:10.913214 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-rtf2t" event={"ID":"e9e3f368-256c-4720-8d71-b0fb2a773e9c","Type":"ContainerStarted","Data":"67b5f525da29424180658f939236554948826f52783398325a6af64177ce6376"} Nov 25 11:00:11 crc kubenswrapper[4932]: I1125 11:00:11.606159 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:00:11 crc kubenswrapper[4932]: E1125 11:00:11.606756 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:00:11 crc kubenswrapper[4932]: I1125 11:00:11.925371 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-rtf2t" event={"ID":"e9e3f368-256c-4720-8d71-b0fb2a773e9c","Type":"ContainerStarted","Data":"de1059514acd8c3b2c33a921dce98438204c664ff3f43bd43ba240cf075e8bbd"} Nov 25 11:00:19 crc kubenswrapper[4932]: I1125 11:00:19.032812 4932 scope.go:117] "RemoveContainer" containerID="89e31562136feb2c0d42ae343294db22e2a1a8aaa064a32443787b36e1c8516b" Nov 25 11:00:22 crc kubenswrapper[4932]: I1125 11:00:22.607347 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:00:22 crc kubenswrapper[4932]: E1125 11:00:22.609335 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:00:33 crc kubenswrapper[4932]: I1125 11:00:33.605996 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:00:33 crc kubenswrapper[4932]: E1125 11:00:33.606749 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:00:45 crc kubenswrapper[4932]: I1125 11:00:45.986404 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-openstack-openstack-cell1-rtf2t" podStartSLOduration=36.529464694 podStartE2EDuration="36.986387322s" podCreationTimestamp="2025-11-25 11:00:09 +0000 UTC" firstStartedPulling="2025-11-25 11:00:10.862665491 +0000 UTC m=+7870.988695074" lastFinishedPulling="2025-11-25 11:00:11.319588139 +0000 UTC m=+7871.445617702" observedRunningTime="2025-11-25 11:00:11.951737164 +0000 UTC m=+7872.077766737" watchObservedRunningTime="2025-11-25 11:00:45.986387322 +0000 UTC m=+7906.112416885" Nov 25 11:00:45 crc kubenswrapper[4932]: I1125 11:00:45.992652 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mjrb7"] Nov 25 11:00:45 crc kubenswrapper[4932]: I1125 11:00:45.995728 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.006710 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mjrb7"] Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.109508 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-utilities\") pod \"redhat-operators-mjrb7\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.109794 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cqbc\" (UniqueName: \"kubernetes.io/projected/3d6498a0-2d33-4224-911d-999c4bf17ada-kube-api-access-4cqbc\") pod \"redhat-operators-mjrb7\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.109919 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-catalog-content\") pod \"redhat-operators-mjrb7\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.212751 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-utilities\") pod \"redhat-operators-mjrb7\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.212827 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cqbc\" (UniqueName: \"kubernetes.io/projected/3d6498a0-2d33-4224-911d-999c4bf17ada-kube-api-access-4cqbc\") pod \"redhat-operators-mjrb7\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.212883 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-catalog-content\") pod \"redhat-operators-mjrb7\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.213397 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-utilities\") pod \"redhat-operators-mjrb7\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.213478 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-catalog-content\") pod \"redhat-operators-mjrb7\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.233292 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cqbc\" (UniqueName: \"kubernetes.io/projected/3d6498a0-2d33-4224-911d-999c4bf17ada-kube-api-access-4cqbc\") pod \"redhat-operators-mjrb7\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.327154 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:46 crc kubenswrapper[4932]: I1125 11:00:46.840553 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mjrb7"] Nov 25 11:00:47 crc kubenswrapper[4932]: I1125 11:00:47.310550 4932 generic.go:334] "Generic (PLEG): container finished" podID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerID="353bad96f0e42457b6ec716bc61e7e77a8bd06499448c53cea531f90d320b107" exitCode=0 Nov 25 11:00:47 crc kubenswrapper[4932]: I1125 11:00:47.310627 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjrb7" event={"ID":"3d6498a0-2d33-4224-911d-999c4bf17ada","Type":"ContainerDied","Data":"353bad96f0e42457b6ec716bc61e7e77a8bd06499448c53cea531f90d320b107"} Nov 25 11:00:47 crc kubenswrapper[4932]: I1125 11:00:47.310877 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjrb7" event={"ID":"3d6498a0-2d33-4224-911d-999c4bf17ada","Type":"ContainerStarted","Data":"96d3569b881fc93f9b8a97f1afb43af5978fa9123f254488043ec5b91346bc98"} Nov 25 11:00:48 crc kubenswrapper[4932]: I1125 11:00:48.322545 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjrb7" event={"ID":"3d6498a0-2d33-4224-911d-999c4bf17ada","Type":"ContainerStarted","Data":"db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880"} Nov 25 11:00:48 crc kubenswrapper[4932]: I1125 11:00:48.606833 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:00:48 crc kubenswrapper[4932]: E1125 11:00:48.607517 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:00:52 crc kubenswrapper[4932]: I1125 11:00:52.365082 4932 generic.go:334] "Generic (PLEG): container finished" podID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerID="db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880" exitCode=0 Nov 25 11:00:52 crc kubenswrapper[4932]: I1125 11:00:52.365266 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjrb7" event={"ID":"3d6498a0-2d33-4224-911d-999c4bf17ada","Type":"ContainerDied","Data":"db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880"} Nov 25 11:00:53 crc kubenswrapper[4932]: I1125 11:00:53.377089 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjrb7" event={"ID":"3d6498a0-2d33-4224-911d-999c4bf17ada","Type":"ContainerStarted","Data":"108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803"} Nov 25 11:00:53 crc kubenswrapper[4932]: I1125 11:00:53.394488 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mjrb7" podStartSLOduration=2.885915574 podStartE2EDuration="8.394468938s" podCreationTimestamp="2025-11-25 11:00:45 +0000 UTC" firstStartedPulling="2025-11-25 11:00:47.313573345 +0000 UTC m=+7907.439602908" lastFinishedPulling="2025-11-25 11:00:52.822126709 +0000 UTC m=+7912.948156272" observedRunningTime="2025-11-25 11:00:53.392835782 +0000 UTC m=+7913.518865335" watchObservedRunningTime="2025-11-25 11:00:53.394468938 +0000 UTC m=+7913.520498501" Nov 25 11:00:56 crc kubenswrapper[4932]: I1125 11:00:56.328129 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:56 crc kubenswrapper[4932]: I1125 11:00:56.328733 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:00:57 crc kubenswrapper[4932]: I1125 11:00:57.381871 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mjrb7" podUID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerName="registry-server" probeResult="failure" output=< Nov 25 11:00:57 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 11:00:57 crc kubenswrapper[4932]: > Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.174937 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401141-rfcrw"] Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.178404 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.204298 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401141-rfcrw"] Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.325697 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-combined-ca-bundle\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.325870 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-config-data\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.326224 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2fwr8\" (UniqueName: \"kubernetes.io/projected/5750b87e-5526-43dc-9adb-c625b223a356-kube-api-access-2fwr8\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.326394 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-fernet-keys\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.428608 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2fwr8\" (UniqueName: \"kubernetes.io/projected/5750b87e-5526-43dc-9adb-c625b223a356-kube-api-access-2fwr8\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.428705 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-fernet-keys\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.428765 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-combined-ca-bundle\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.428842 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-config-data\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.435576 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-config-data\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.437319 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-fernet-keys\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.438262 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-combined-ca-bundle\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.448804 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2fwr8\" (UniqueName: \"kubernetes.io/projected/5750b87e-5526-43dc-9adb-c625b223a356-kube-api-access-2fwr8\") pod \"keystone-cron-29401141-rfcrw\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:00 crc kubenswrapper[4932]: I1125 11:01:00.514034 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:01 crc kubenswrapper[4932]: I1125 11:01:01.012041 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401141-rfcrw"] Nov 25 11:01:01 crc kubenswrapper[4932]: I1125 11:01:01.455556 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-rfcrw" event={"ID":"5750b87e-5526-43dc-9adb-c625b223a356","Type":"ContainerStarted","Data":"9660aeb89aeb1845eedc3b7e0b1e667f19aa70a558e8004193b5a63a85fed771"} Nov 25 11:01:01 crc kubenswrapper[4932]: I1125 11:01:01.455915 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-rfcrw" event={"ID":"5750b87e-5526-43dc-9adb-c625b223a356","Type":"ContainerStarted","Data":"743755b8100903a6171de3f264e2808f53102df74ac76687cf14ef4ff6efd2fd"} Nov 25 11:01:01 crc kubenswrapper[4932]: I1125 11:01:01.606108 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:01:01 crc kubenswrapper[4932]: E1125 11:01:01.606447 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:01:05 crc kubenswrapper[4932]: I1125 11:01:05.494366 4932 generic.go:334] "Generic (PLEG): container finished" podID="5750b87e-5526-43dc-9adb-c625b223a356" containerID="9660aeb89aeb1845eedc3b7e0b1e667f19aa70a558e8004193b5a63a85fed771" exitCode=0 Nov 25 11:01:05 crc kubenswrapper[4932]: I1125 11:01:05.494949 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-rfcrw" event={"ID":"5750b87e-5526-43dc-9adb-c625b223a356","Type":"ContainerDied","Data":"9660aeb89aeb1845eedc3b7e0b1e667f19aa70a558e8004193b5a63a85fed771"} Nov 25 11:01:06 crc kubenswrapper[4932]: I1125 11:01:06.387066 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:01:06 crc kubenswrapper[4932]: I1125 11:01:06.446371 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:01:06 crc kubenswrapper[4932]: I1125 11:01:06.641134 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mjrb7"] Nov 25 11:01:06 crc kubenswrapper[4932]: I1125 11:01:06.917739 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.086948 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2fwr8\" (UniqueName: \"kubernetes.io/projected/5750b87e-5526-43dc-9adb-c625b223a356-kube-api-access-2fwr8\") pod \"5750b87e-5526-43dc-9adb-c625b223a356\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.087060 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-config-data\") pod \"5750b87e-5526-43dc-9adb-c625b223a356\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.087309 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-combined-ca-bundle\") pod \"5750b87e-5526-43dc-9adb-c625b223a356\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.087341 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-fernet-keys\") pod \"5750b87e-5526-43dc-9adb-c625b223a356\" (UID: \"5750b87e-5526-43dc-9adb-c625b223a356\") " Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.095137 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5750b87e-5526-43dc-9adb-c625b223a356-kube-api-access-2fwr8" (OuterVolumeSpecName: "kube-api-access-2fwr8") pod "5750b87e-5526-43dc-9adb-c625b223a356" (UID: "5750b87e-5526-43dc-9adb-c625b223a356"). InnerVolumeSpecName "kube-api-access-2fwr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.102676 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5750b87e-5526-43dc-9adb-c625b223a356" (UID: "5750b87e-5526-43dc-9adb-c625b223a356"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.132344 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5750b87e-5526-43dc-9adb-c625b223a356" (UID: "5750b87e-5526-43dc-9adb-c625b223a356"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.171205 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-config-data" (OuterVolumeSpecName: "config-data") pod "5750b87e-5526-43dc-9adb-c625b223a356" (UID: "5750b87e-5526-43dc-9adb-c625b223a356"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.190079 4932 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.190123 4932 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.190139 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2fwr8\" (UniqueName: \"kubernetes.io/projected/5750b87e-5526-43dc-9adb-c625b223a356-kube-api-access-2fwr8\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.190150 4932 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5750b87e-5526-43dc-9adb-c625b223a356-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.520148 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401141-rfcrw" event={"ID":"5750b87e-5526-43dc-9adb-c625b223a356","Type":"ContainerDied","Data":"743755b8100903a6171de3f264e2808f53102df74ac76687cf14ef4ff6efd2fd"} Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.520222 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="743755b8100903a6171de3f264e2808f53102df74ac76687cf14ef4ff6efd2fd" Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.520168 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401141-rfcrw" Nov 25 11:01:07 crc kubenswrapper[4932]: I1125 11:01:07.520365 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mjrb7" podUID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerName="registry-server" containerID="cri-o://108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803" gracePeriod=2 Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.044269 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.213893 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cqbc\" (UniqueName: \"kubernetes.io/projected/3d6498a0-2d33-4224-911d-999c4bf17ada-kube-api-access-4cqbc\") pod \"3d6498a0-2d33-4224-911d-999c4bf17ada\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.214095 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-utilities\") pod \"3d6498a0-2d33-4224-911d-999c4bf17ada\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.214163 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-catalog-content\") pod \"3d6498a0-2d33-4224-911d-999c4bf17ada\" (UID: \"3d6498a0-2d33-4224-911d-999c4bf17ada\") " Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.215447 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-utilities" (OuterVolumeSpecName: "utilities") pod "3d6498a0-2d33-4224-911d-999c4bf17ada" (UID: "3d6498a0-2d33-4224-911d-999c4bf17ada"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.229777 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d6498a0-2d33-4224-911d-999c4bf17ada-kube-api-access-4cqbc" (OuterVolumeSpecName: "kube-api-access-4cqbc") pod "3d6498a0-2d33-4224-911d-999c4bf17ada" (UID: "3d6498a0-2d33-4224-911d-999c4bf17ada"). InnerVolumeSpecName "kube-api-access-4cqbc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.308991 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3d6498a0-2d33-4224-911d-999c4bf17ada" (UID: "3d6498a0-2d33-4224-911d-999c4bf17ada"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.316843 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.316880 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cqbc\" (UniqueName: \"kubernetes.io/projected/3d6498a0-2d33-4224-911d-999c4bf17ada-kube-api-access-4cqbc\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.316892 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d6498a0-2d33-4224-911d-999c4bf17ada-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.533627 4932 generic.go:334] "Generic (PLEG): container finished" podID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerID="108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803" exitCode=0 Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.533679 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjrb7" event={"ID":"3d6498a0-2d33-4224-911d-999c4bf17ada","Type":"ContainerDied","Data":"108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803"} Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.533708 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mjrb7" event={"ID":"3d6498a0-2d33-4224-911d-999c4bf17ada","Type":"ContainerDied","Data":"96d3569b881fc93f9b8a97f1afb43af5978fa9123f254488043ec5b91346bc98"} Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.533729 4932 scope.go:117] "RemoveContainer" containerID="108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.533791 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mjrb7" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.573510 4932 scope.go:117] "RemoveContainer" containerID="db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.581497 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mjrb7"] Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.592549 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mjrb7"] Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.618152 4932 scope.go:117] "RemoveContainer" containerID="353bad96f0e42457b6ec716bc61e7e77a8bd06499448c53cea531f90d320b107" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.625979 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d6498a0-2d33-4224-911d-999c4bf17ada" path="/var/lib/kubelet/pods/3d6498a0-2d33-4224-911d-999c4bf17ada/volumes" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.672897 4932 scope.go:117] "RemoveContainer" containerID="108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803" Nov 25 11:01:08 crc kubenswrapper[4932]: E1125 11:01:08.673426 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803\": container with ID starting with 108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803 not found: ID does not exist" containerID="108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.673498 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803"} err="failed to get container status \"108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803\": rpc error: code = NotFound desc = could not find container \"108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803\": container with ID starting with 108db1fec9a298f522d0e0f79b09bd06a744d18d95dbb9d71ec930d2e6219803 not found: ID does not exist" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.673538 4932 scope.go:117] "RemoveContainer" containerID="db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880" Nov 25 11:01:08 crc kubenswrapper[4932]: E1125 11:01:08.674770 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880\": container with ID starting with db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880 not found: ID does not exist" containerID="db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.674844 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880"} err="failed to get container status \"db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880\": rpc error: code = NotFound desc = could not find container \"db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880\": container with ID starting with db0ea50c8427ab2e8b2a2c084da148d95af6ef9695f74ad8de595a1994435880 not found: ID does not exist" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.674884 4932 scope.go:117] "RemoveContainer" containerID="353bad96f0e42457b6ec716bc61e7e77a8bd06499448c53cea531f90d320b107" Nov 25 11:01:08 crc kubenswrapper[4932]: E1125 11:01:08.675804 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"353bad96f0e42457b6ec716bc61e7e77a8bd06499448c53cea531f90d320b107\": container with ID starting with 353bad96f0e42457b6ec716bc61e7e77a8bd06499448c53cea531f90d320b107 not found: ID does not exist" containerID="353bad96f0e42457b6ec716bc61e7e77a8bd06499448c53cea531f90d320b107" Nov 25 11:01:08 crc kubenswrapper[4932]: I1125 11:01:08.675882 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"353bad96f0e42457b6ec716bc61e7e77a8bd06499448c53cea531f90d320b107"} err="failed to get container status \"353bad96f0e42457b6ec716bc61e7e77a8bd06499448c53cea531f90d320b107\": rpc error: code = NotFound desc = could not find container \"353bad96f0e42457b6ec716bc61e7e77a8bd06499448c53cea531f90d320b107\": container with ID starting with 353bad96f0e42457b6ec716bc61e7e77a8bd06499448c53cea531f90d320b107 not found: ID does not exist" Nov 25 11:01:13 crc kubenswrapper[4932]: I1125 11:01:13.606461 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:01:13 crc kubenswrapper[4932]: E1125 11:01:13.607274 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:01:23 crc kubenswrapper[4932]: I1125 11:01:23.688883 4932 generic.go:334] "Generic (PLEG): container finished" podID="e9e3f368-256c-4720-8d71-b0fb2a773e9c" containerID="de1059514acd8c3b2c33a921dce98438204c664ff3f43bd43ba240cf075e8bbd" exitCode=0 Nov 25 11:01:23 crc kubenswrapper[4932]: I1125 11:01:23.688958 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-rtf2t" event={"ID":"e9e3f368-256c-4720-8d71-b0fb2a773e9c","Type":"ContainerDied","Data":"de1059514acd8c3b2c33a921dce98438204c664ff3f43bd43ba240cf075e8bbd"} Nov 25 11:01:24 crc kubenswrapper[4932]: I1125 11:01:24.607168 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:01:24 crc kubenswrapper[4932]: E1125 11:01:24.607543 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.129294 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.200690 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ssh-key\") pod \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.200819 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-inventory\") pod \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.200900 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovncontroller-config-0\") pod \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.200943 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htgmh\" (UniqueName: \"kubernetes.io/projected/e9e3f368-256c-4720-8d71-b0fb2a773e9c-kube-api-access-htgmh\") pod \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.201001 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovn-combined-ca-bundle\") pod \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\" (UID: \"e9e3f368-256c-4720-8d71-b0fb2a773e9c\") " Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.206507 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "e9e3f368-256c-4720-8d71-b0fb2a773e9c" (UID: "e9e3f368-256c-4720-8d71-b0fb2a773e9c"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.206744 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9e3f368-256c-4720-8d71-b0fb2a773e9c-kube-api-access-htgmh" (OuterVolumeSpecName: "kube-api-access-htgmh") pod "e9e3f368-256c-4720-8d71-b0fb2a773e9c" (UID: "e9e3f368-256c-4720-8d71-b0fb2a773e9c"). InnerVolumeSpecName "kube-api-access-htgmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.236314 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "e9e3f368-256c-4720-8d71-b0fb2a773e9c" (UID: "e9e3f368-256c-4720-8d71-b0fb2a773e9c"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.238692 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-inventory" (OuterVolumeSpecName: "inventory") pod "e9e3f368-256c-4720-8d71-b0fb2a773e9c" (UID: "e9e3f368-256c-4720-8d71-b0fb2a773e9c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.239140 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e9e3f368-256c-4720-8d71-b0fb2a773e9c" (UID: "e9e3f368-256c-4720-8d71-b0fb2a773e9c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.303691 4932 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.303993 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.304081 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e9e3f368-256c-4720-8d71-b0fb2a773e9c-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.304137 4932 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e9e3f368-256c-4720-8d71-b0fb2a773e9c-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.304205 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htgmh\" (UniqueName: \"kubernetes.io/projected/e9e3f368-256c-4720-8d71-b0fb2a773e9c-kube-api-access-htgmh\") on node \"crc\" DevicePath \"\"" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.710137 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-rtf2t" event={"ID":"e9e3f368-256c-4720-8d71-b0fb2a773e9c","Type":"ContainerDied","Data":"67b5f525da29424180658f939236554948826f52783398325a6af64177ce6376"} Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.710176 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="67b5f525da29424180658f939236554948826f52783398325a6af64177ce6376" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.710212 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-rtf2t" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.803387 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-8d8xc"] Nov 25 11:01:25 crc kubenswrapper[4932]: E1125 11:01:25.803890 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerName="extract-utilities" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.803908 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerName="extract-utilities" Nov 25 11:01:25 crc kubenswrapper[4932]: E1125 11:01:25.803930 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5750b87e-5526-43dc-9adb-c625b223a356" containerName="keystone-cron" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.803937 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="5750b87e-5526-43dc-9adb-c625b223a356" containerName="keystone-cron" Nov 25 11:01:25 crc kubenswrapper[4932]: E1125 11:01:25.803975 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9e3f368-256c-4720-8d71-b0fb2a773e9c" containerName="ovn-openstack-openstack-cell1" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.803987 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9e3f368-256c-4720-8d71-b0fb2a773e9c" containerName="ovn-openstack-openstack-cell1" Nov 25 11:01:25 crc kubenswrapper[4932]: E1125 11:01:25.804021 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerName="extract-content" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.804029 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerName="extract-content" Nov 25 11:01:25 crc kubenswrapper[4932]: E1125 11:01:25.804049 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerName="registry-server" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.804056 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerName="registry-server" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.804324 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="5750b87e-5526-43dc-9adb-c625b223a356" containerName="keystone-cron" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.804357 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d6498a0-2d33-4224-911d-999c4bf17ada" containerName="registry-server" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.804377 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9e3f368-256c-4720-8d71-b0fb2a773e9c" containerName="ovn-openstack-openstack-cell1" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.805278 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.811576 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.811878 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.811982 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.812178 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.812406 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.812567 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.820209 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-8d8xc"] Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.936101 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.936217 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.936403 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.936614 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvmzh\" (UniqueName: \"kubernetes.io/projected/c749bf16-f867-4742-824c-703b7220d49c-kube-api-access-rvmzh\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.936677 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:25 crc kubenswrapper[4932]: I1125 11:01:25.936715 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.038229 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.038999 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.039104 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.039132 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.039583 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.039781 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvmzh\" (UniqueName: \"kubernetes.io/projected/c749bf16-f867-4742-824c-703b7220d49c-kube-api-access-rvmzh\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.044964 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.045701 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.045955 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.046258 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.053980 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.060180 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvmzh\" (UniqueName: \"kubernetes.io/projected/c749bf16-f867-4742-824c-703b7220d49c-kube-api-access-rvmzh\") pod \"neutron-metadata-openstack-openstack-cell1-8d8xc\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.149599 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.690804 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-8d8xc"] Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.700672 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:01:26 crc kubenswrapper[4932]: I1125 11:01:26.738377 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" event={"ID":"c749bf16-f867-4742-824c-703b7220d49c","Type":"ContainerStarted","Data":"d634b421fa652302600bd1a5c05b94a31c876b3ebc489118d3ffbed513575c67"} Nov 25 11:01:27 crc kubenswrapper[4932]: I1125 11:01:27.748817 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" event={"ID":"c749bf16-f867-4742-824c-703b7220d49c","Type":"ContainerStarted","Data":"ad1b128a8466f6003932ff240d14a4eb79a702027ec33fb77d43fc49bdbc5b15"} Nov 25 11:01:27 crc kubenswrapper[4932]: I1125 11:01:27.768133 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" podStartSLOduration=2.307640122 podStartE2EDuration="2.768115672s" podCreationTimestamp="2025-11-25 11:01:25 +0000 UTC" firstStartedPulling="2025-11-25 11:01:26.69960881 +0000 UTC m=+7946.825638373" lastFinishedPulling="2025-11-25 11:01:27.16008436 +0000 UTC m=+7947.286113923" observedRunningTime="2025-11-25 11:01:27.764414296 +0000 UTC m=+7947.890443869" watchObservedRunningTime="2025-11-25 11:01:27.768115672 +0000 UTC m=+7947.894145235" Nov 25 11:01:39 crc kubenswrapper[4932]: I1125 11:01:39.606350 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:01:39 crc kubenswrapper[4932]: E1125 11:01:39.607479 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:01:50 crc kubenswrapper[4932]: I1125 11:01:50.615612 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:01:50 crc kubenswrapper[4932]: E1125 11:01:50.616654 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:02:05 crc kubenswrapper[4932]: I1125 11:02:05.607278 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:02:05 crc kubenswrapper[4932]: E1125 11:02:05.608262 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:02:18 crc kubenswrapper[4932]: I1125 11:02:18.606169 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:02:18 crc kubenswrapper[4932]: E1125 11:02:18.607012 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:02:19 crc kubenswrapper[4932]: I1125 11:02:19.239948 4932 generic.go:334] "Generic (PLEG): container finished" podID="c749bf16-f867-4742-824c-703b7220d49c" containerID="ad1b128a8466f6003932ff240d14a4eb79a702027ec33fb77d43fc49bdbc5b15" exitCode=0 Nov 25 11:02:19 crc kubenswrapper[4932]: I1125 11:02:19.239996 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" event={"ID":"c749bf16-f867-4742-824c-703b7220d49c","Type":"ContainerDied","Data":"ad1b128a8466f6003932ff240d14a4eb79a702027ec33fb77d43fc49bdbc5b15"} Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.715120 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.829934 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-metadata-combined-ca-bundle\") pod \"c749bf16-f867-4742-824c-703b7220d49c\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.830074 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvmzh\" (UniqueName: \"kubernetes.io/projected/c749bf16-f867-4742-824c-703b7220d49c-kube-api-access-rvmzh\") pod \"c749bf16-f867-4742-824c-703b7220d49c\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.830124 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-ovn-metadata-agent-neutron-config-0\") pod \"c749bf16-f867-4742-824c-703b7220d49c\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.830373 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-nova-metadata-neutron-config-0\") pod \"c749bf16-f867-4742-824c-703b7220d49c\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.830532 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-inventory\") pod \"c749bf16-f867-4742-824c-703b7220d49c\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.830569 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-ssh-key\") pod \"c749bf16-f867-4742-824c-703b7220d49c\" (UID: \"c749bf16-f867-4742-824c-703b7220d49c\") " Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.838567 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c749bf16-f867-4742-824c-703b7220d49c-kube-api-access-rvmzh" (OuterVolumeSpecName: "kube-api-access-rvmzh") pod "c749bf16-f867-4742-824c-703b7220d49c" (UID: "c749bf16-f867-4742-824c-703b7220d49c"). InnerVolumeSpecName "kube-api-access-rvmzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.840972 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "c749bf16-f867-4742-824c-703b7220d49c" (UID: "c749bf16-f867-4742-824c-703b7220d49c"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.864275 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "c749bf16-f867-4742-824c-703b7220d49c" (UID: "c749bf16-f867-4742-824c-703b7220d49c"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.868711 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "c749bf16-f867-4742-824c-703b7220d49c" (UID: "c749bf16-f867-4742-824c-703b7220d49c"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.870817 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-inventory" (OuterVolumeSpecName: "inventory") pod "c749bf16-f867-4742-824c-703b7220d49c" (UID: "c749bf16-f867-4742-824c-703b7220d49c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.872604 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c749bf16-f867-4742-824c-703b7220d49c" (UID: "c749bf16-f867-4742-824c-703b7220d49c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.932948 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.933383 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.933494 4932 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.933579 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvmzh\" (UniqueName: \"kubernetes.io/projected/c749bf16-f867-4742-824c-703b7220d49c-kube-api-access-rvmzh\") on node \"crc\" DevicePath \"\"" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.933660 4932 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:02:20 crc kubenswrapper[4932]: I1125 11:02:20.933738 4932 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c749bf16-f867-4742-824c-703b7220d49c-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.262821 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" event={"ID":"c749bf16-f867-4742-824c-703b7220d49c","Type":"ContainerDied","Data":"d634b421fa652302600bd1a5c05b94a31c876b3ebc489118d3ffbed513575c67"} Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.262868 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d634b421fa652302600bd1a5c05b94a31c876b3ebc489118d3ffbed513575c67" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.262930 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-8d8xc" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.343458 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-h7fl6"] Nov 25 11:02:21 crc kubenswrapper[4932]: E1125 11:02:21.344003 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c749bf16-f867-4742-824c-703b7220d49c" containerName="neutron-metadata-openstack-openstack-cell1" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.344030 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c749bf16-f867-4742-824c-703b7220d49c" containerName="neutron-metadata-openstack-openstack-cell1" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.344356 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c749bf16-f867-4742-824c-703b7220d49c" containerName="neutron-metadata-openstack-openstack-cell1" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.345441 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.347996 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.348248 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.349354 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.349357 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.349763 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.357513 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-h7fl6"] Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.444402 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.445076 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-inventory\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.445255 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.445312 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mftx\" (UniqueName: \"kubernetes.io/projected/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-kube-api-access-4mftx\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.445365 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-ssh-key\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.547027 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.547076 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-inventory\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.547131 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.547350 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mftx\" (UniqueName: \"kubernetes.io/projected/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-kube-api-access-4mftx\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.548127 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-ssh-key\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.552860 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.552870 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-inventory\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.562972 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.563056 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-ssh-key\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.566488 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mftx\" (UniqueName: \"kubernetes.io/projected/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-kube-api-access-4mftx\") pod \"libvirt-openstack-openstack-cell1-h7fl6\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:21 crc kubenswrapper[4932]: I1125 11:02:21.678828 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:02:22 crc kubenswrapper[4932]: I1125 11:02:22.175451 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-h7fl6"] Nov 25 11:02:22 crc kubenswrapper[4932]: I1125 11:02:22.272537 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" event={"ID":"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92","Type":"ContainerStarted","Data":"69e7c4a4b9b72efd7e5cfdd81caac16d4e9f138671a21cd98a9b3a8b0c1b388b"} Nov 25 11:02:23 crc kubenswrapper[4932]: I1125 11:02:23.283272 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" event={"ID":"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92","Type":"ContainerStarted","Data":"5adec265080ed5824cba2238942f0f05a414a3b6b8c9e353ed61bdefda6aabbd"} Nov 25 11:02:32 crc kubenswrapper[4932]: I1125 11:02:32.606701 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:02:32 crc kubenswrapper[4932]: E1125 11:02:32.608594 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:02:45 crc kubenswrapper[4932]: I1125 11:02:45.606581 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:02:45 crc kubenswrapper[4932]: E1125 11:02:45.607631 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:02:59 crc kubenswrapper[4932]: I1125 11:02:59.606603 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:02:59 crc kubenswrapper[4932]: E1125 11:02:59.607621 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:03:11 crc kubenswrapper[4932]: I1125 11:03:11.606423 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:03:11 crc kubenswrapper[4932]: E1125 11:03:11.607347 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:03:25 crc kubenswrapper[4932]: I1125 11:03:25.607070 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:03:25 crc kubenswrapper[4932]: E1125 11:03:25.607845 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:03:37 crc kubenswrapper[4932]: I1125 11:03:37.606834 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:03:38 crc kubenswrapper[4932]: I1125 11:03:38.028719 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"5d323c78ea3c7096bec61f602d4c0a496b960f52b18bce2aa7e7e84b920a39a7"} Nov 25 11:03:38 crc kubenswrapper[4932]: I1125 11:03:38.053096 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" podStartSLOduration=76.504426286 podStartE2EDuration="1m17.053076795s" podCreationTimestamp="2025-11-25 11:02:21 +0000 UTC" firstStartedPulling="2025-11-25 11:02:22.176944091 +0000 UTC m=+8002.302973654" lastFinishedPulling="2025-11-25 11:02:22.7255946 +0000 UTC m=+8002.851624163" observedRunningTime="2025-11-25 11:02:23.301742579 +0000 UTC m=+8003.427772152" watchObservedRunningTime="2025-11-25 11:03:38.053076795 +0000 UTC m=+8078.179106358" Nov 25 11:05:37 crc kubenswrapper[4932]: I1125 11:05:37.181380 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:05:37 crc kubenswrapper[4932]: I1125 11:05:37.181839 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:06:07 crc kubenswrapper[4932]: I1125 11:06:07.180889 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:06:07 crc kubenswrapper[4932]: I1125 11:06:07.181564 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.199053 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jd2t7"] Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.202416 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.220995 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jd2t7"] Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.303389 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-utilities\") pod \"community-operators-jd2t7\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.303699 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zcb7\" (UniqueName: \"kubernetes.io/projected/916996ba-f712-493a-b4cb-185f7cbcdc4f-kube-api-access-7zcb7\") pod \"community-operators-jd2t7\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.304030 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-catalog-content\") pod \"community-operators-jd2t7\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.406394 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-utilities\") pod \"community-operators-jd2t7\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.406544 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zcb7\" (UniqueName: \"kubernetes.io/projected/916996ba-f712-493a-b4cb-185f7cbcdc4f-kube-api-access-7zcb7\") pod \"community-operators-jd2t7\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.406676 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-catalog-content\") pod \"community-operators-jd2t7\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.407244 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-utilities\") pod \"community-operators-jd2t7\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.407359 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-catalog-content\") pod \"community-operators-jd2t7\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.434855 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zcb7\" (UniqueName: \"kubernetes.io/projected/916996ba-f712-493a-b4cb-185f7cbcdc4f-kube-api-access-7zcb7\") pod \"community-operators-jd2t7\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:25 crc kubenswrapper[4932]: I1125 11:06:25.530532 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:26 crc kubenswrapper[4932]: I1125 11:06:26.095926 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jd2t7"] Nov 25 11:06:26 crc kubenswrapper[4932]: I1125 11:06:26.784059 4932 generic.go:334] "Generic (PLEG): container finished" podID="916996ba-f712-493a-b4cb-185f7cbcdc4f" containerID="f492f89ac5273c922d6f01048ef7dae5fe0422c8e5882a8b0998067a367878e1" exitCode=0 Nov 25 11:06:26 crc kubenswrapper[4932]: I1125 11:06:26.784177 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jd2t7" event={"ID":"916996ba-f712-493a-b4cb-185f7cbcdc4f","Type":"ContainerDied","Data":"f492f89ac5273c922d6f01048ef7dae5fe0422c8e5882a8b0998067a367878e1"} Nov 25 11:06:26 crc kubenswrapper[4932]: I1125 11:06:26.784465 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jd2t7" event={"ID":"916996ba-f712-493a-b4cb-185f7cbcdc4f","Type":"ContainerStarted","Data":"c2cc0e1bfc4cd475b15a47d310c07baaa772cf2821b519060807a76c2f979939"} Nov 25 11:06:26 crc kubenswrapper[4932]: I1125 11:06:26.788302 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:06:28 crc kubenswrapper[4932]: I1125 11:06:28.812708 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jd2t7" event={"ID":"916996ba-f712-493a-b4cb-185f7cbcdc4f","Type":"ContainerStarted","Data":"49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121"} Nov 25 11:06:29 crc kubenswrapper[4932]: I1125 11:06:29.844453 4932 generic.go:334] "Generic (PLEG): container finished" podID="916996ba-f712-493a-b4cb-185f7cbcdc4f" containerID="49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121" exitCode=0 Nov 25 11:06:29 crc kubenswrapper[4932]: I1125 11:06:29.845666 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jd2t7" event={"ID":"916996ba-f712-493a-b4cb-185f7cbcdc4f","Type":"ContainerDied","Data":"49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121"} Nov 25 11:06:30 crc kubenswrapper[4932]: I1125 11:06:30.860740 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jd2t7" event={"ID":"916996ba-f712-493a-b4cb-185f7cbcdc4f","Type":"ContainerStarted","Data":"622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71"} Nov 25 11:06:30 crc kubenswrapper[4932]: I1125 11:06:30.879979 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jd2t7" podStartSLOduration=2.369560928 podStartE2EDuration="5.879961507s" podCreationTimestamp="2025-11-25 11:06:25 +0000 UTC" firstStartedPulling="2025-11-25 11:06:26.787780433 +0000 UTC m=+8246.913809996" lastFinishedPulling="2025-11-25 11:06:30.298181012 +0000 UTC m=+8250.424210575" observedRunningTime="2025-11-25 11:06:30.879771512 +0000 UTC m=+8251.005801095" watchObservedRunningTime="2025-11-25 11:06:30.879961507 +0000 UTC m=+8251.005991070" Nov 25 11:06:35 crc kubenswrapper[4932]: I1125 11:06:35.531088 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:35 crc kubenswrapper[4932]: I1125 11:06:35.531580 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:35 crc kubenswrapper[4932]: I1125 11:06:35.586407 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:35 crc kubenswrapper[4932]: I1125 11:06:35.965283 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:36 crc kubenswrapper[4932]: I1125 11:06:36.011959 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jd2t7"] Nov 25 11:06:37 crc kubenswrapper[4932]: I1125 11:06:37.180640 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:06:37 crc kubenswrapper[4932]: I1125 11:06:37.180977 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:06:37 crc kubenswrapper[4932]: I1125 11:06:37.181029 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 11:06:37 crc kubenswrapper[4932]: I1125 11:06:37.181932 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5d323c78ea3c7096bec61f602d4c0a496b960f52b18bce2aa7e7e84b920a39a7"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:06:37 crc kubenswrapper[4932]: I1125 11:06:37.181988 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://5d323c78ea3c7096bec61f602d4c0a496b960f52b18bce2aa7e7e84b920a39a7" gracePeriod=600 Nov 25 11:06:37 crc kubenswrapper[4932]: I1125 11:06:37.936259 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="5d323c78ea3c7096bec61f602d4c0a496b960f52b18bce2aa7e7e84b920a39a7" exitCode=0 Nov 25 11:06:37 crc kubenswrapper[4932]: I1125 11:06:37.936367 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"5d323c78ea3c7096bec61f602d4c0a496b960f52b18bce2aa7e7e84b920a39a7"} Nov 25 11:06:37 crc kubenswrapper[4932]: I1125 11:06:37.936676 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f"} Nov 25 11:06:37 crc kubenswrapper[4932]: I1125 11:06:37.936709 4932 scope.go:117] "RemoveContainer" containerID="3324f47b1db27f97b82fa79f8b61c36ab922e4c5e6bc1284daa10f80ff25283b" Nov 25 11:06:37 crc kubenswrapper[4932]: I1125 11:06:37.936798 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jd2t7" podUID="916996ba-f712-493a-b4cb-185f7cbcdc4f" containerName="registry-server" containerID="cri-o://622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71" gracePeriod=2 Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.397897 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.527155 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-catalog-content\") pod \"916996ba-f712-493a-b4cb-185f7cbcdc4f\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.527474 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-utilities\") pod \"916996ba-f712-493a-b4cb-185f7cbcdc4f\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.527534 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zcb7\" (UniqueName: \"kubernetes.io/projected/916996ba-f712-493a-b4cb-185f7cbcdc4f-kube-api-access-7zcb7\") pod \"916996ba-f712-493a-b4cb-185f7cbcdc4f\" (UID: \"916996ba-f712-493a-b4cb-185f7cbcdc4f\") " Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.528048 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-utilities" (OuterVolumeSpecName: "utilities") pod "916996ba-f712-493a-b4cb-185f7cbcdc4f" (UID: "916996ba-f712-493a-b4cb-185f7cbcdc4f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.532464 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/916996ba-f712-493a-b4cb-185f7cbcdc4f-kube-api-access-7zcb7" (OuterVolumeSpecName: "kube-api-access-7zcb7") pod "916996ba-f712-493a-b4cb-185f7cbcdc4f" (UID: "916996ba-f712-493a-b4cb-185f7cbcdc4f"). InnerVolumeSpecName "kube-api-access-7zcb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.583320 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "916996ba-f712-493a-b4cb-185f7cbcdc4f" (UID: "916996ba-f712-493a-b4cb-185f7cbcdc4f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.629898 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.629946 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zcb7\" (UniqueName: \"kubernetes.io/projected/916996ba-f712-493a-b4cb-185f7cbcdc4f-kube-api-access-7zcb7\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.629963 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/916996ba-f712-493a-b4cb-185f7cbcdc4f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.971339 4932 generic.go:334] "Generic (PLEG): container finished" podID="916996ba-f712-493a-b4cb-185f7cbcdc4f" containerID="622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71" exitCode=0 Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.971422 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jd2t7" Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.971422 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jd2t7" event={"ID":"916996ba-f712-493a-b4cb-185f7cbcdc4f","Type":"ContainerDied","Data":"622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71"} Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.972299 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jd2t7" event={"ID":"916996ba-f712-493a-b4cb-185f7cbcdc4f","Type":"ContainerDied","Data":"c2cc0e1bfc4cd475b15a47d310c07baaa772cf2821b519060807a76c2f979939"} Nov 25 11:06:38 crc kubenswrapper[4932]: I1125 11:06:38.972326 4932 scope.go:117] "RemoveContainer" containerID="622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71" Nov 25 11:06:39 crc kubenswrapper[4932]: I1125 11:06:39.000641 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jd2t7"] Nov 25 11:06:39 crc kubenswrapper[4932]: I1125 11:06:39.009225 4932 scope.go:117] "RemoveContainer" containerID="49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121" Nov 25 11:06:39 crc kubenswrapper[4932]: I1125 11:06:39.009533 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jd2t7"] Nov 25 11:06:39 crc kubenswrapper[4932]: I1125 11:06:39.032815 4932 scope.go:117] "RemoveContainer" containerID="f492f89ac5273c922d6f01048ef7dae5fe0422c8e5882a8b0998067a367878e1" Nov 25 11:06:39 crc kubenswrapper[4932]: I1125 11:06:39.113841 4932 scope.go:117] "RemoveContainer" containerID="622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71" Nov 25 11:06:39 crc kubenswrapper[4932]: E1125 11:06:39.114330 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71\": container with ID starting with 622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71 not found: ID does not exist" containerID="622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71" Nov 25 11:06:39 crc kubenswrapper[4932]: I1125 11:06:39.114396 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71"} err="failed to get container status \"622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71\": rpc error: code = NotFound desc = could not find container \"622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71\": container with ID starting with 622c9c4864a185fc8be72cd28213985fd63b1d87b9d9b14712847627f16b3e71 not found: ID does not exist" Nov 25 11:06:39 crc kubenswrapper[4932]: I1125 11:06:39.114431 4932 scope.go:117] "RemoveContainer" containerID="49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121" Nov 25 11:06:39 crc kubenswrapper[4932]: E1125 11:06:39.114741 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121\": container with ID starting with 49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121 not found: ID does not exist" containerID="49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121" Nov 25 11:06:39 crc kubenswrapper[4932]: I1125 11:06:39.114781 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121"} err="failed to get container status \"49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121\": rpc error: code = NotFound desc = could not find container \"49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121\": container with ID starting with 49e1471e7826e06239156d41482372c4160a1d13e26778a1fe892001b4b04121 not found: ID does not exist" Nov 25 11:06:39 crc kubenswrapper[4932]: I1125 11:06:39.114807 4932 scope.go:117] "RemoveContainer" containerID="f492f89ac5273c922d6f01048ef7dae5fe0422c8e5882a8b0998067a367878e1" Nov 25 11:06:39 crc kubenswrapper[4932]: E1125 11:06:39.115041 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f492f89ac5273c922d6f01048ef7dae5fe0422c8e5882a8b0998067a367878e1\": container with ID starting with f492f89ac5273c922d6f01048ef7dae5fe0422c8e5882a8b0998067a367878e1 not found: ID does not exist" containerID="f492f89ac5273c922d6f01048ef7dae5fe0422c8e5882a8b0998067a367878e1" Nov 25 11:06:39 crc kubenswrapper[4932]: I1125 11:06:39.115075 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f492f89ac5273c922d6f01048ef7dae5fe0422c8e5882a8b0998067a367878e1"} err="failed to get container status \"f492f89ac5273c922d6f01048ef7dae5fe0422c8e5882a8b0998067a367878e1\": rpc error: code = NotFound desc = could not find container \"f492f89ac5273c922d6f01048ef7dae5fe0422c8e5882a8b0998067a367878e1\": container with ID starting with f492f89ac5273c922d6f01048ef7dae5fe0422c8e5882a8b0998067a367878e1 not found: ID does not exist" Nov 25 11:06:40 crc kubenswrapper[4932]: I1125 11:06:40.625638 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="916996ba-f712-493a-b4cb-185f7cbcdc4f" path="/var/lib/kubelet/pods/916996ba-f712-493a-b4cb-185f7cbcdc4f/volumes" Nov 25 11:06:59 crc kubenswrapper[4932]: I1125 11:06:59.164486 4932 generic.go:334] "Generic (PLEG): container finished" podID="ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92" containerID="5adec265080ed5824cba2238942f0f05a414a3b6b8c9e353ed61bdefda6aabbd" exitCode=0 Nov 25 11:06:59 crc kubenswrapper[4932]: I1125 11:06:59.164579 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" event={"ID":"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92","Type":"ContainerDied","Data":"5adec265080ed5824cba2238942f0f05a414a3b6b8c9e353ed61bdefda6aabbd"} Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.653148 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.728485 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-ssh-key\") pod \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.728563 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-inventory\") pod \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.728671 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mftx\" (UniqueName: \"kubernetes.io/projected/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-kube-api-access-4mftx\") pod \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.728805 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-secret-0\") pod \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.728857 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-combined-ca-bundle\") pod \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\" (UID: \"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92\") " Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.735115 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92" (UID: "ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.735144 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-kube-api-access-4mftx" (OuterVolumeSpecName: "kube-api-access-4mftx") pod "ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92" (UID: "ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92"). InnerVolumeSpecName "kube-api-access-4mftx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.760114 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92" (UID: "ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.766331 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-inventory" (OuterVolumeSpecName: "inventory") pod "ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92" (UID: "ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.766810 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92" (UID: "ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.831970 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.832015 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.832030 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mftx\" (UniqueName: \"kubernetes.io/projected/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-kube-api-access-4mftx\") on node \"crc\" DevicePath \"\"" Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.832048 4932 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:07:00 crc kubenswrapper[4932]: I1125 11:07:00.832059 4932 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.186537 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" event={"ID":"ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92","Type":"ContainerDied","Data":"69e7c4a4b9b72efd7e5cfdd81caac16d4e9f138671a21cd98a9b3a8b0c1b388b"} Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.186592 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69e7c4a4b9b72efd7e5cfdd81caac16d4e9f138671a21cd98a9b3a8b0c1b388b" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.186655 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-h7fl6" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.283205 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-jqjhl"] Nov 25 11:07:01 crc kubenswrapper[4932]: E1125 11:07:01.283635 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92" containerName="libvirt-openstack-openstack-cell1" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.283652 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92" containerName="libvirt-openstack-openstack-cell1" Nov 25 11:07:01 crc kubenswrapper[4932]: E1125 11:07:01.283686 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="916996ba-f712-493a-b4cb-185f7cbcdc4f" containerName="extract-content" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.283695 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="916996ba-f712-493a-b4cb-185f7cbcdc4f" containerName="extract-content" Nov 25 11:07:01 crc kubenswrapper[4932]: E1125 11:07:01.283707 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="916996ba-f712-493a-b4cb-185f7cbcdc4f" containerName="extract-utilities" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.283714 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="916996ba-f712-493a-b4cb-185f7cbcdc4f" containerName="extract-utilities" Nov 25 11:07:01 crc kubenswrapper[4932]: E1125 11:07:01.283730 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="916996ba-f712-493a-b4cb-185f7cbcdc4f" containerName="registry-server" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.283736 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="916996ba-f712-493a-b4cb-185f7cbcdc4f" containerName="registry-server" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.283941 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="916996ba-f712-493a-b4cb-185f7cbcdc4f" containerName="registry-server" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.283969 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca2d3ba3-c99c-4b62-acba-9eb2e5f13d92" containerName="libvirt-openstack-openstack-cell1" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.284750 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.291397 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.294077 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.294282 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.294302 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.294344 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.294405 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.294300 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.312149 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-jqjhl"] Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.343358 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.343547 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqtpw\" (UniqueName: \"kubernetes.io/projected/8bedcf24-a05b-4934-b6ae-0042dc38673a-kube-api-access-tqtpw\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.343591 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.343622 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.343647 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.343681 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.343890 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.343947 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-inventory\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.343999 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.446681 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.447834 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.447934 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-inventory\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.448027 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.448321 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.448625 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqtpw\" (UniqueName: \"kubernetes.io/projected/8bedcf24-a05b-4934-b6ae-0042dc38673a-kube-api-access-tqtpw\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.448724 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.448836 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.448972 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.450017 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.452265 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.452389 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.452745 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-inventory\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.453068 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.453587 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.453745 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.454309 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.467771 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqtpw\" (UniqueName: \"kubernetes.io/projected/8bedcf24-a05b-4934-b6ae-0042dc38673a-kube-api-access-tqtpw\") pod \"nova-cell1-openstack-openstack-cell1-jqjhl\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:01 crc kubenswrapper[4932]: I1125 11:07:01.606214 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:07:02 crc kubenswrapper[4932]: I1125 11:07:02.133329 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-jqjhl"] Nov 25 11:07:02 crc kubenswrapper[4932]: I1125 11:07:02.199484 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" event={"ID":"8bedcf24-a05b-4934-b6ae-0042dc38673a","Type":"ContainerStarted","Data":"ba17102bc62e4c9ee8d3734c58637953a3f238822a6852e5a7fc85c928ee9da0"} Nov 25 11:07:03 crc kubenswrapper[4932]: I1125 11:07:03.210924 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" event={"ID":"8bedcf24-a05b-4934-b6ae-0042dc38673a","Type":"ContainerStarted","Data":"cadb3867161bc571a5d136845ea932fb2d3b6be53d9606251647d0b56c63d8b3"} Nov 25 11:07:03 crc kubenswrapper[4932]: I1125 11:07:03.236906 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" podStartSLOduration=1.696528161 podStartE2EDuration="2.236883956s" podCreationTimestamp="2025-11-25 11:07:01 +0000 UTC" firstStartedPulling="2025-11-25 11:07:02.145702186 +0000 UTC m=+8282.271731749" lastFinishedPulling="2025-11-25 11:07:02.686057981 +0000 UTC m=+8282.812087544" observedRunningTime="2025-11-25 11:07:03.230202084 +0000 UTC m=+8283.356231647" watchObservedRunningTime="2025-11-25 11:07:03.236883956 +0000 UTC m=+8283.362913519" Nov 25 11:08:37 crc kubenswrapper[4932]: I1125 11:08:37.181473 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:08:37 crc kubenswrapper[4932]: I1125 11:08:37.181921 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.383639 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ll9j5"] Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.390165 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.400049 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ll9j5"] Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.463084 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-utilities\") pod \"certified-operators-ll9j5\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.463150 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-catalog-content\") pod \"certified-operators-ll9j5\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.463275 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gx5z7\" (UniqueName: \"kubernetes.io/projected/c9e6a574-399b-4de8-a421-8dbf33383e6e-kube-api-access-gx5z7\") pod \"certified-operators-ll9j5\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.565258 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-utilities\") pod \"certified-operators-ll9j5\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.565323 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-catalog-content\") pod \"certified-operators-ll9j5\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.565401 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gx5z7\" (UniqueName: \"kubernetes.io/projected/c9e6a574-399b-4de8-a421-8dbf33383e6e-kube-api-access-gx5z7\") pod \"certified-operators-ll9j5\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.566243 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-utilities\") pod \"certified-operators-ll9j5\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.566490 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-catalog-content\") pod \"certified-operators-ll9j5\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.589423 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gx5z7\" (UniqueName: \"kubernetes.io/projected/c9e6a574-399b-4de8-a421-8dbf33383e6e-kube-api-access-gx5z7\") pod \"certified-operators-ll9j5\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:08:50 crc kubenswrapper[4932]: I1125 11:08:50.710724 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:08:51 crc kubenswrapper[4932]: I1125 11:08:51.386853 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ll9j5"] Nov 25 11:08:52 crc kubenswrapper[4932]: I1125 11:08:52.290994 4932 generic.go:334] "Generic (PLEG): container finished" podID="c9e6a574-399b-4de8-a421-8dbf33383e6e" containerID="e6dc4d56d08f2bb67af09bc689072d7f568cb4917c0399361caed05a030e01ab" exitCode=0 Nov 25 11:08:52 crc kubenswrapper[4932]: I1125 11:08:52.291065 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll9j5" event={"ID":"c9e6a574-399b-4de8-a421-8dbf33383e6e","Type":"ContainerDied","Data":"e6dc4d56d08f2bb67af09bc689072d7f568cb4917c0399361caed05a030e01ab"} Nov 25 11:08:52 crc kubenswrapper[4932]: I1125 11:08:52.291496 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll9j5" event={"ID":"c9e6a574-399b-4de8-a421-8dbf33383e6e","Type":"ContainerStarted","Data":"1f7d73de72a69d024aaeb817006239f12b4b6b569b1378966aa3651d15169b49"} Nov 25 11:08:53 crc kubenswrapper[4932]: I1125 11:08:53.303841 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll9j5" event={"ID":"c9e6a574-399b-4de8-a421-8dbf33383e6e","Type":"ContainerStarted","Data":"1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b"} Nov 25 11:08:55 crc kubenswrapper[4932]: I1125 11:08:55.325353 4932 generic.go:334] "Generic (PLEG): container finished" podID="c9e6a574-399b-4de8-a421-8dbf33383e6e" containerID="1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b" exitCode=0 Nov 25 11:08:55 crc kubenswrapper[4932]: I1125 11:08:55.325415 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll9j5" event={"ID":"c9e6a574-399b-4de8-a421-8dbf33383e6e","Type":"ContainerDied","Data":"1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b"} Nov 25 11:08:56 crc kubenswrapper[4932]: I1125 11:08:56.338027 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll9j5" event={"ID":"c9e6a574-399b-4de8-a421-8dbf33383e6e","Type":"ContainerStarted","Data":"c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652"} Nov 25 11:08:56 crc kubenswrapper[4932]: I1125 11:08:56.362487 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ll9j5" podStartSLOduration=2.714872061 podStartE2EDuration="6.362461214s" podCreationTimestamp="2025-11-25 11:08:50 +0000 UTC" firstStartedPulling="2025-11-25 11:08:52.292964352 +0000 UTC m=+8392.418993925" lastFinishedPulling="2025-11-25 11:08:55.940553515 +0000 UTC m=+8396.066583078" observedRunningTime="2025-11-25 11:08:56.357936274 +0000 UTC m=+8396.483965867" watchObservedRunningTime="2025-11-25 11:08:56.362461214 +0000 UTC m=+8396.488490777" Nov 25 11:09:00 crc kubenswrapper[4932]: I1125 11:09:00.711505 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:09:00 crc kubenswrapper[4932]: I1125 11:09:00.713264 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:09:00 crc kubenswrapper[4932]: I1125 11:09:00.764361 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:09:01 crc kubenswrapper[4932]: I1125 11:09:01.434616 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:09:01 crc kubenswrapper[4932]: I1125 11:09:01.487547 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ll9j5"] Nov 25 11:09:03 crc kubenswrapper[4932]: I1125 11:09:03.415485 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ll9j5" podUID="c9e6a574-399b-4de8-a421-8dbf33383e6e" containerName="registry-server" containerID="cri-o://c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652" gracePeriod=2 Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.383775 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.427786 4932 generic.go:334] "Generic (PLEG): container finished" podID="c9e6a574-399b-4de8-a421-8dbf33383e6e" containerID="c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652" exitCode=0 Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.427837 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll9j5" event={"ID":"c9e6a574-399b-4de8-a421-8dbf33383e6e","Type":"ContainerDied","Data":"c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652"} Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.427859 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ll9j5" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.427870 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll9j5" event={"ID":"c9e6a574-399b-4de8-a421-8dbf33383e6e","Type":"ContainerDied","Data":"1f7d73de72a69d024aaeb817006239f12b4b6b569b1378966aa3651d15169b49"} Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.428121 4932 scope.go:117] "RemoveContainer" containerID="c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.452522 4932 scope.go:117] "RemoveContainer" containerID="1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.474148 4932 scope.go:117] "RemoveContainer" containerID="e6dc4d56d08f2bb67af09bc689072d7f568cb4917c0399361caed05a030e01ab" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.482589 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gx5z7\" (UniqueName: \"kubernetes.io/projected/c9e6a574-399b-4de8-a421-8dbf33383e6e-kube-api-access-gx5z7\") pod \"c9e6a574-399b-4de8-a421-8dbf33383e6e\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.482816 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-utilities\") pod \"c9e6a574-399b-4de8-a421-8dbf33383e6e\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.482949 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-catalog-content\") pod \"c9e6a574-399b-4de8-a421-8dbf33383e6e\" (UID: \"c9e6a574-399b-4de8-a421-8dbf33383e6e\") " Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.483861 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-utilities" (OuterVolumeSpecName: "utilities") pod "c9e6a574-399b-4de8-a421-8dbf33383e6e" (UID: "c9e6a574-399b-4de8-a421-8dbf33383e6e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.488441 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9e6a574-399b-4de8-a421-8dbf33383e6e-kube-api-access-gx5z7" (OuterVolumeSpecName: "kube-api-access-gx5z7") pod "c9e6a574-399b-4de8-a421-8dbf33383e6e" (UID: "c9e6a574-399b-4de8-a421-8dbf33383e6e"). InnerVolumeSpecName "kube-api-access-gx5z7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.530723 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c9e6a574-399b-4de8-a421-8dbf33383e6e" (UID: "c9e6a574-399b-4de8-a421-8dbf33383e6e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.581322 4932 scope.go:117] "RemoveContainer" containerID="c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652" Nov 25 11:09:04 crc kubenswrapper[4932]: E1125 11:09:04.581714 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652\": container with ID starting with c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652 not found: ID does not exist" containerID="c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.581754 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652"} err="failed to get container status \"c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652\": rpc error: code = NotFound desc = could not find container \"c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652\": container with ID starting with c849d9c715cbefdd1fef72c700133f4d79be418e5e503cb2a84d0be5ac167652 not found: ID does not exist" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.581782 4932 scope.go:117] "RemoveContainer" containerID="1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b" Nov 25 11:09:04 crc kubenswrapper[4932]: E1125 11:09:04.582085 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b\": container with ID starting with 1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b not found: ID does not exist" containerID="1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.582113 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b"} err="failed to get container status \"1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b\": rpc error: code = NotFound desc = could not find container \"1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b\": container with ID starting with 1dac50c97e714cdf0548749276b5df1853a0094b0552f39753c32e3535d0d23b not found: ID does not exist" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.582131 4932 scope.go:117] "RemoveContainer" containerID="e6dc4d56d08f2bb67af09bc689072d7f568cb4917c0399361caed05a030e01ab" Nov 25 11:09:04 crc kubenswrapper[4932]: E1125 11:09:04.582433 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6dc4d56d08f2bb67af09bc689072d7f568cb4917c0399361caed05a030e01ab\": container with ID starting with e6dc4d56d08f2bb67af09bc689072d7f568cb4917c0399361caed05a030e01ab not found: ID does not exist" containerID="e6dc4d56d08f2bb67af09bc689072d7f568cb4917c0399361caed05a030e01ab" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.582462 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6dc4d56d08f2bb67af09bc689072d7f568cb4917c0399361caed05a030e01ab"} err="failed to get container status \"e6dc4d56d08f2bb67af09bc689072d7f568cb4917c0399361caed05a030e01ab\": rpc error: code = NotFound desc = could not find container \"e6dc4d56d08f2bb67af09bc689072d7f568cb4917c0399361caed05a030e01ab\": container with ID starting with e6dc4d56d08f2bb67af09bc689072d7f568cb4917c0399361caed05a030e01ab not found: ID does not exist" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.585053 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.585083 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9e6a574-399b-4de8-a421-8dbf33383e6e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.585094 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gx5z7\" (UniqueName: \"kubernetes.io/projected/c9e6a574-399b-4de8-a421-8dbf33383e6e-kube-api-access-gx5z7\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.762642 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ll9j5"] Nov 25 11:09:04 crc kubenswrapper[4932]: I1125 11:09:04.777645 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ll9j5"] Nov 25 11:09:06 crc kubenswrapper[4932]: I1125 11:09:06.619841 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9e6a574-399b-4de8-a421-8dbf33383e6e" path="/var/lib/kubelet/pods/c9e6a574-399b-4de8-a421-8dbf33383e6e/volumes" Nov 25 11:09:07 crc kubenswrapper[4932]: I1125 11:09:07.181091 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:09:07 crc kubenswrapper[4932]: I1125 11:09:07.181155 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.456010 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bbv7s"] Nov 25 11:09:16 crc kubenswrapper[4932]: E1125 11:09:16.458037 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9e6a574-399b-4de8-a421-8dbf33383e6e" containerName="registry-server" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.458059 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9e6a574-399b-4de8-a421-8dbf33383e6e" containerName="registry-server" Nov 25 11:09:16 crc kubenswrapper[4932]: E1125 11:09:16.458094 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9e6a574-399b-4de8-a421-8dbf33383e6e" containerName="extract-content" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.458102 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9e6a574-399b-4de8-a421-8dbf33383e6e" containerName="extract-content" Nov 25 11:09:16 crc kubenswrapper[4932]: E1125 11:09:16.458174 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9e6a574-399b-4de8-a421-8dbf33383e6e" containerName="extract-utilities" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.458201 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9e6a574-399b-4de8-a421-8dbf33383e6e" containerName="extract-utilities" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.459069 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9e6a574-399b-4de8-a421-8dbf33383e6e" containerName="registry-server" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.465695 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.487472 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbv7s"] Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.665977 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-catalog-content\") pod \"redhat-marketplace-bbv7s\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.666075 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-utilities\") pod \"redhat-marketplace-bbv7s\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.666257 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l69kz\" (UniqueName: \"kubernetes.io/projected/cf309b36-e198-46df-b012-0563d770b8b2-kube-api-access-l69kz\") pod \"redhat-marketplace-bbv7s\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.768465 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-catalog-content\") pod \"redhat-marketplace-bbv7s\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.768590 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-utilities\") pod \"redhat-marketplace-bbv7s\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.768626 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l69kz\" (UniqueName: \"kubernetes.io/projected/cf309b36-e198-46df-b012-0563d770b8b2-kube-api-access-l69kz\") pod \"redhat-marketplace-bbv7s\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.769259 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-catalog-content\") pod \"redhat-marketplace-bbv7s\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.769486 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-utilities\") pod \"redhat-marketplace-bbv7s\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.797750 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l69kz\" (UniqueName: \"kubernetes.io/projected/cf309b36-e198-46df-b012-0563d770b8b2-kube-api-access-l69kz\") pod \"redhat-marketplace-bbv7s\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:16 crc kubenswrapper[4932]: I1125 11:09:16.803634 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:17 crc kubenswrapper[4932]: I1125 11:09:17.448693 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbv7s"] Nov 25 11:09:17 crc kubenswrapper[4932]: I1125 11:09:17.564439 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbv7s" event={"ID":"cf309b36-e198-46df-b012-0563d770b8b2","Type":"ContainerStarted","Data":"d1246473b43b7d820c229c49bef0c562b071cb4b23c0abb09312dbf3974331b0"} Nov 25 11:09:18 crc kubenswrapper[4932]: I1125 11:09:18.584425 4932 generic.go:334] "Generic (PLEG): container finished" podID="cf309b36-e198-46df-b012-0563d770b8b2" containerID="95502636f7836f3d76e32880cf1c32280e7154107b4307738fb299d38ce084aa" exitCode=0 Nov 25 11:09:18 crc kubenswrapper[4932]: I1125 11:09:18.584491 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbv7s" event={"ID":"cf309b36-e198-46df-b012-0563d770b8b2","Type":"ContainerDied","Data":"95502636f7836f3d76e32880cf1c32280e7154107b4307738fb299d38ce084aa"} Nov 25 11:09:20 crc kubenswrapper[4932]: I1125 11:09:20.611124 4932 generic.go:334] "Generic (PLEG): container finished" podID="cf309b36-e198-46df-b012-0563d770b8b2" containerID="d654b7513718e953cda7b2da6eeb03f3f01ab49b350d35bb4087849783406863" exitCode=0 Nov 25 11:09:20 crc kubenswrapper[4932]: I1125 11:09:20.623163 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbv7s" event={"ID":"cf309b36-e198-46df-b012-0563d770b8b2","Type":"ContainerDied","Data":"d654b7513718e953cda7b2da6eeb03f3f01ab49b350d35bb4087849783406863"} Nov 25 11:09:21 crc kubenswrapper[4932]: I1125 11:09:21.624767 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbv7s" event={"ID":"cf309b36-e198-46df-b012-0563d770b8b2","Type":"ContainerStarted","Data":"fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755"} Nov 25 11:09:21 crc kubenswrapper[4932]: I1125 11:09:21.643842 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bbv7s" podStartSLOduration=3.168569884 podStartE2EDuration="5.643826103s" podCreationTimestamp="2025-11-25 11:09:16 +0000 UTC" firstStartedPulling="2025-11-25 11:09:18.587498099 +0000 UTC m=+8418.713527672" lastFinishedPulling="2025-11-25 11:09:21.062754328 +0000 UTC m=+8421.188783891" observedRunningTime="2025-11-25 11:09:21.640473256 +0000 UTC m=+8421.766502829" watchObservedRunningTime="2025-11-25 11:09:21.643826103 +0000 UTC m=+8421.769855656" Nov 25 11:09:26 crc kubenswrapper[4932]: I1125 11:09:26.804478 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:26 crc kubenswrapper[4932]: I1125 11:09:26.805135 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:26 crc kubenswrapper[4932]: I1125 11:09:26.857608 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:27 crc kubenswrapper[4932]: I1125 11:09:27.761472 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:27 crc kubenswrapper[4932]: I1125 11:09:27.811614 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbv7s"] Nov 25 11:09:29 crc kubenswrapper[4932]: I1125 11:09:29.725825 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bbv7s" podUID="cf309b36-e198-46df-b012-0563d770b8b2" containerName="registry-server" containerID="cri-o://fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755" gracePeriod=2 Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.177236 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.285701 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-catalog-content\") pod \"cf309b36-e198-46df-b012-0563d770b8b2\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.285902 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l69kz\" (UniqueName: \"kubernetes.io/projected/cf309b36-e198-46df-b012-0563d770b8b2-kube-api-access-l69kz\") pod \"cf309b36-e198-46df-b012-0563d770b8b2\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.285935 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-utilities\") pod \"cf309b36-e198-46df-b012-0563d770b8b2\" (UID: \"cf309b36-e198-46df-b012-0563d770b8b2\") " Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.287176 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-utilities" (OuterVolumeSpecName: "utilities") pod "cf309b36-e198-46df-b012-0563d770b8b2" (UID: "cf309b36-e198-46df-b012-0563d770b8b2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.293246 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf309b36-e198-46df-b012-0563d770b8b2-kube-api-access-l69kz" (OuterVolumeSpecName: "kube-api-access-l69kz") pod "cf309b36-e198-46df-b012-0563d770b8b2" (UID: "cf309b36-e198-46df-b012-0563d770b8b2"). InnerVolumeSpecName "kube-api-access-l69kz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.305603 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cf309b36-e198-46df-b012-0563d770b8b2" (UID: "cf309b36-e198-46df-b012-0563d770b8b2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.388532 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l69kz\" (UniqueName: \"kubernetes.io/projected/cf309b36-e198-46df-b012-0563d770b8b2-kube-api-access-l69kz\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.388577 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.388590 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf309b36-e198-46df-b012-0563d770b8b2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.738220 4932 generic.go:334] "Generic (PLEG): container finished" podID="cf309b36-e198-46df-b012-0563d770b8b2" containerID="fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755" exitCode=0 Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.738277 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bbv7s" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.738274 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbv7s" event={"ID":"cf309b36-e198-46df-b012-0563d770b8b2","Type":"ContainerDied","Data":"fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755"} Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.738422 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bbv7s" event={"ID":"cf309b36-e198-46df-b012-0563d770b8b2","Type":"ContainerDied","Data":"d1246473b43b7d820c229c49bef0c562b071cb4b23c0abb09312dbf3974331b0"} Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.738445 4932 scope.go:117] "RemoveContainer" containerID="fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.765238 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbv7s"] Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.773481 4932 scope.go:117] "RemoveContainer" containerID="d654b7513718e953cda7b2da6eeb03f3f01ab49b350d35bb4087849783406863" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.779752 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bbv7s"] Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.794829 4932 scope.go:117] "RemoveContainer" containerID="95502636f7836f3d76e32880cf1c32280e7154107b4307738fb299d38ce084aa" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.850364 4932 scope.go:117] "RemoveContainer" containerID="fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755" Nov 25 11:09:30 crc kubenswrapper[4932]: E1125 11:09:30.850849 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755\": container with ID starting with fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755 not found: ID does not exist" containerID="fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.850890 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755"} err="failed to get container status \"fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755\": rpc error: code = NotFound desc = could not find container \"fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755\": container with ID starting with fa705d6957c2f703441bd9ad3ecd359ce2ce4109201a128003a4ed74f15a0755 not found: ID does not exist" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.850918 4932 scope.go:117] "RemoveContainer" containerID="d654b7513718e953cda7b2da6eeb03f3f01ab49b350d35bb4087849783406863" Nov 25 11:09:30 crc kubenswrapper[4932]: E1125 11:09:30.851203 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d654b7513718e953cda7b2da6eeb03f3f01ab49b350d35bb4087849783406863\": container with ID starting with d654b7513718e953cda7b2da6eeb03f3f01ab49b350d35bb4087849783406863 not found: ID does not exist" containerID="d654b7513718e953cda7b2da6eeb03f3f01ab49b350d35bb4087849783406863" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.851235 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d654b7513718e953cda7b2da6eeb03f3f01ab49b350d35bb4087849783406863"} err="failed to get container status \"d654b7513718e953cda7b2da6eeb03f3f01ab49b350d35bb4087849783406863\": rpc error: code = NotFound desc = could not find container \"d654b7513718e953cda7b2da6eeb03f3f01ab49b350d35bb4087849783406863\": container with ID starting with d654b7513718e953cda7b2da6eeb03f3f01ab49b350d35bb4087849783406863 not found: ID does not exist" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.851253 4932 scope.go:117] "RemoveContainer" containerID="95502636f7836f3d76e32880cf1c32280e7154107b4307738fb299d38ce084aa" Nov 25 11:09:30 crc kubenswrapper[4932]: E1125 11:09:30.851665 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95502636f7836f3d76e32880cf1c32280e7154107b4307738fb299d38ce084aa\": container with ID starting with 95502636f7836f3d76e32880cf1c32280e7154107b4307738fb299d38ce084aa not found: ID does not exist" containerID="95502636f7836f3d76e32880cf1c32280e7154107b4307738fb299d38ce084aa" Nov 25 11:09:30 crc kubenswrapper[4932]: I1125 11:09:30.851690 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95502636f7836f3d76e32880cf1c32280e7154107b4307738fb299d38ce084aa"} err="failed to get container status \"95502636f7836f3d76e32880cf1c32280e7154107b4307738fb299d38ce084aa\": rpc error: code = NotFound desc = could not find container \"95502636f7836f3d76e32880cf1c32280e7154107b4307738fb299d38ce084aa\": container with ID starting with 95502636f7836f3d76e32880cf1c32280e7154107b4307738fb299d38ce084aa not found: ID does not exist" Nov 25 11:09:32 crc kubenswrapper[4932]: I1125 11:09:32.617287 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf309b36-e198-46df-b012-0563d770b8b2" path="/var/lib/kubelet/pods/cf309b36-e198-46df-b012-0563d770b8b2/volumes" Nov 25 11:09:37 crc kubenswrapper[4932]: I1125 11:09:37.180750 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:09:37 crc kubenswrapper[4932]: I1125 11:09:37.181346 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 11:09:37 crc kubenswrapper[4932]: I1125 11:09:37.181393 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" Nov 25 11:09:37 crc kubenswrapper[4932]: I1125 11:09:37.182233 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f"} pod="openshift-machine-config-operator/machine-config-daemon-plbqh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 11:09:37 crc kubenswrapper[4932]: I1125 11:09:37.182302 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" containerID="cri-o://eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" gracePeriod=600 Nov 25 11:09:37 crc kubenswrapper[4932]: E1125 11:09:37.302811 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:09:37 crc kubenswrapper[4932]: I1125 11:09:37.836248 4932 generic.go:334] "Generic (PLEG): container finished" podID="fc52f208-3635-4b33-a1f2-720bcff56064" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" exitCode=0 Nov 25 11:09:37 crc kubenswrapper[4932]: I1125 11:09:37.836487 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerDied","Data":"eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f"} Nov 25 11:09:37 crc kubenswrapper[4932]: I1125 11:09:37.836633 4932 scope.go:117] "RemoveContainer" containerID="5d323c78ea3c7096bec61f602d4c0a496b960f52b18bce2aa7e7e84b920a39a7" Nov 25 11:09:37 crc kubenswrapper[4932]: I1125 11:09:37.844433 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:09:37 crc kubenswrapper[4932]: E1125 11:09:37.845708 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:09:51 crc kubenswrapper[4932]: I1125 11:09:51.606402 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:09:51 crc kubenswrapper[4932]: E1125 11:09:51.607423 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:10:06 crc kubenswrapper[4932]: I1125 11:10:06.605852 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:10:06 crc kubenswrapper[4932]: E1125 11:10:06.606677 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:10:08 crc kubenswrapper[4932]: I1125 11:10:08.137746 4932 generic.go:334] "Generic (PLEG): container finished" podID="8bedcf24-a05b-4934-b6ae-0042dc38673a" containerID="cadb3867161bc571a5d136845ea932fb2d3b6be53d9606251647d0b56c63d8b3" exitCode=0 Nov 25 11:10:08 crc kubenswrapper[4932]: I1125 11:10:08.137803 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" event={"ID":"8bedcf24-a05b-4934-b6ae-0042dc38673a","Type":"ContainerDied","Data":"cadb3867161bc571a5d136845ea932fb2d3b6be53d9606251647d0b56c63d8b3"} Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.605045 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.737857 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-1\") pod \"8bedcf24-a05b-4934-b6ae-0042dc38673a\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.738230 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-combined-ca-bundle\") pod \"8bedcf24-a05b-4934-b6ae-0042dc38673a\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.738262 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-0\") pod \"8bedcf24-a05b-4934-b6ae-0042dc38673a\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.738310 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqtpw\" (UniqueName: \"kubernetes.io/projected/8bedcf24-a05b-4934-b6ae-0042dc38673a-kube-api-access-tqtpw\") pod \"8bedcf24-a05b-4934-b6ae-0042dc38673a\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.738407 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cells-global-config-0\") pod \"8bedcf24-a05b-4934-b6ae-0042dc38673a\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.738487 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-inventory\") pod \"8bedcf24-a05b-4934-b6ae-0042dc38673a\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.738578 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-0\") pod \"8bedcf24-a05b-4934-b6ae-0042dc38673a\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.738733 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-ssh-key\") pod \"8bedcf24-a05b-4934-b6ae-0042dc38673a\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.738783 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-1\") pod \"8bedcf24-a05b-4934-b6ae-0042dc38673a\" (UID: \"8bedcf24-a05b-4934-b6ae-0042dc38673a\") " Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.758172 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "8bedcf24-a05b-4934-b6ae-0042dc38673a" (UID: "8bedcf24-a05b-4934-b6ae-0042dc38673a"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.758182 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bedcf24-a05b-4934-b6ae-0042dc38673a-kube-api-access-tqtpw" (OuterVolumeSpecName: "kube-api-access-tqtpw") pod "8bedcf24-a05b-4934-b6ae-0042dc38673a" (UID: "8bedcf24-a05b-4934-b6ae-0042dc38673a"). InnerVolumeSpecName "kube-api-access-tqtpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.775460 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "8bedcf24-a05b-4934-b6ae-0042dc38673a" (UID: "8bedcf24-a05b-4934-b6ae-0042dc38673a"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.775537 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-inventory" (OuterVolumeSpecName: "inventory") pod "8bedcf24-a05b-4934-b6ae-0042dc38673a" (UID: "8bedcf24-a05b-4934-b6ae-0042dc38673a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.778730 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8bedcf24-a05b-4934-b6ae-0042dc38673a" (UID: "8bedcf24-a05b-4934-b6ae-0042dc38673a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.779325 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "8bedcf24-a05b-4934-b6ae-0042dc38673a" (UID: "8bedcf24-a05b-4934-b6ae-0042dc38673a"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.779592 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "8bedcf24-a05b-4934-b6ae-0042dc38673a" (UID: "8bedcf24-a05b-4934-b6ae-0042dc38673a"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.781186 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "8bedcf24-a05b-4934-b6ae-0042dc38673a" (UID: "8bedcf24-a05b-4934-b6ae-0042dc38673a"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.787020 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "8bedcf24-a05b-4934-b6ae-0042dc38673a" (UID: "8bedcf24-a05b-4934-b6ae-0042dc38673a"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.842010 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqtpw\" (UniqueName: \"kubernetes.io/projected/8bedcf24-a05b-4934-b6ae-0042dc38673a-kube-api-access-tqtpw\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.842054 4932 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.842067 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.842078 4932 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.842089 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.842100 4932 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.842114 4932 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.842149 4932 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:09 crc kubenswrapper[4932]: I1125 11:10:09.842161 4932 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/8bedcf24-a05b-4934-b6ae-0042dc38673a-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.158888 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" event={"ID":"8bedcf24-a05b-4934-b6ae-0042dc38673a","Type":"ContainerDied","Data":"ba17102bc62e4c9ee8d3734c58637953a3f238822a6852e5a7fc85c928ee9da0"} Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.158939 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba17102bc62e4c9ee8d3734c58637953a3f238822a6852e5a7fc85c928ee9da0" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.158984 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-jqjhl" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.248114 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-6wtk6"] Nov 25 11:10:10 crc kubenswrapper[4932]: E1125 11:10:10.248608 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bedcf24-a05b-4934-b6ae-0042dc38673a" containerName="nova-cell1-openstack-openstack-cell1" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.248629 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bedcf24-a05b-4934-b6ae-0042dc38673a" containerName="nova-cell1-openstack-openstack-cell1" Nov 25 11:10:10 crc kubenswrapper[4932]: E1125 11:10:10.248658 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf309b36-e198-46df-b012-0563d770b8b2" containerName="extract-utilities" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.248665 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf309b36-e198-46df-b012-0563d770b8b2" containerName="extract-utilities" Nov 25 11:10:10 crc kubenswrapper[4932]: E1125 11:10:10.248686 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf309b36-e198-46df-b012-0563d770b8b2" containerName="extract-content" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.248693 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf309b36-e198-46df-b012-0563d770b8b2" containerName="extract-content" Nov 25 11:10:10 crc kubenswrapper[4932]: E1125 11:10:10.248707 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf309b36-e198-46df-b012-0563d770b8b2" containerName="registry-server" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.248712 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf309b36-e198-46df-b012-0563d770b8b2" containerName="registry-server" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.248890 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf309b36-e198-46df-b012-0563d770b8b2" containerName="registry-server" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.248907 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bedcf24-a05b-4934-b6ae-0042dc38673a" containerName="nova-cell1-openstack-openstack-cell1" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.249718 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.252980 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.255991 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.256275 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.256430 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.256484 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.265215 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-6wtk6"] Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.352346 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.352411 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.352475 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ssh-key\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.352500 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-inventory\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.352523 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqwl8\" (UniqueName: \"kubernetes.io/projected/4530a78c-b3ce-425c-bad3-c8821d4de544-kube-api-access-hqwl8\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.352614 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.352677 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.454782 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.454911 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.454990 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.455026 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.455110 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ssh-key\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.455146 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-inventory\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.455176 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqwl8\" (UniqueName: \"kubernetes.io/projected/4530a78c-b3ce-425c-bad3-c8821d4de544-kube-api-access-hqwl8\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.459939 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.460038 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-inventory\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.460216 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.461194 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ssh-key\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.462035 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.462429 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.475840 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqwl8\" (UniqueName: \"kubernetes.io/projected/4530a78c-b3ce-425c-bad3-c8821d4de544-kube-api-access-hqwl8\") pod \"telemetry-openstack-openstack-cell1-6wtk6\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:10 crc kubenswrapper[4932]: I1125 11:10:10.569212 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:10:11 crc kubenswrapper[4932]: I1125 11:10:11.129685 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-6wtk6"] Nov 25 11:10:11 crc kubenswrapper[4932]: I1125 11:10:11.169568 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" event={"ID":"4530a78c-b3ce-425c-bad3-c8821d4de544","Type":"ContainerStarted","Data":"a4ab40c8c34a40449625270e61f0e8bd0f05265c26a1e563db710c9187e54aac"} Nov 25 11:10:12 crc kubenswrapper[4932]: I1125 11:10:12.180554 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" event={"ID":"4530a78c-b3ce-425c-bad3-c8821d4de544","Type":"ContainerStarted","Data":"9e66d5eb73ffc7acf7cc0559fa6c3791d7a9071f7b17d00b8a6c0b13a5e9aa38"} Nov 25 11:10:19 crc kubenswrapper[4932]: I1125 11:10:19.606161 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:10:19 crc kubenswrapper[4932]: E1125 11:10:19.607008 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:10:34 crc kubenswrapper[4932]: I1125 11:10:34.606955 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:10:34 crc kubenswrapper[4932]: E1125 11:10:34.607812 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:10:49 crc kubenswrapper[4932]: I1125 11:10:49.605925 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:10:49 crc kubenswrapper[4932]: E1125 11:10:49.607177 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:11:02 crc kubenswrapper[4932]: I1125 11:11:02.607784 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:11:02 crc kubenswrapper[4932]: E1125 11:11:02.608938 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:11:04 crc kubenswrapper[4932]: I1125 11:11:04.757804 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" podStartSLOduration=54.256707628 podStartE2EDuration="54.757759823s" podCreationTimestamp="2025-11-25 11:10:10 +0000 UTC" firstStartedPulling="2025-11-25 11:10:11.128019286 +0000 UTC m=+8471.254048849" lastFinishedPulling="2025-11-25 11:10:11.629071491 +0000 UTC m=+8471.755101044" observedRunningTime="2025-11-25 11:10:12.198080739 +0000 UTC m=+8472.324110322" watchObservedRunningTime="2025-11-25 11:11:04.757759823 +0000 UTC m=+8524.883789396" Nov 25 11:11:04 crc kubenswrapper[4932]: I1125 11:11:04.762680 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6hvvq"] Nov 25 11:11:04 crc kubenswrapper[4932]: I1125 11:11:04.765969 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:04 crc kubenswrapper[4932]: I1125 11:11:04.785389 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6hvvq"] Nov 25 11:11:04 crc kubenswrapper[4932]: I1125 11:11:04.939401 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-catalog-content\") pod \"redhat-operators-6hvvq\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:04 crc kubenswrapper[4932]: I1125 11:11:04.939501 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c96kh\" (UniqueName: \"kubernetes.io/projected/b4521fea-13fc-4cef-a878-64291b273e7b-kube-api-access-c96kh\") pod \"redhat-operators-6hvvq\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:04 crc kubenswrapper[4932]: I1125 11:11:04.939598 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-utilities\") pod \"redhat-operators-6hvvq\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:05 crc kubenswrapper[4932]: I1125 11:11:05.042304 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-catalog-content\") pod \"redhat-operators-6hvvq\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:05 crc kubenswrapper[4932]: I1125 11:11:05.042369 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c96kh\" (UniqueName: \"kubernetes.io/projected/b4521fea-13fc-4cef-a878-64291b273e7b-kube-api-access-c96kh\") pod \"redhat-operators-6hvvq\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:05 crc kubenswrapper[4932]: I1125 11:11:05.042401 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-utilities\") pod \"redhat-operators-6hvvq\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:05 crc kubenswrapper[4932]: I1125 11:11:05.043163 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-utilities\") pod \"redhat-operators-6hvvq\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:05 crc kubenswrapper[4932]: I1125 11:11:05.043391 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-catalog-content\") pod \"redhat-operators-6hvvq\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:05 crc kubenswrapper[4932]: I1125 11:11:05.063787 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c96kh\" (UniqueName: \"kubernetes.io/projected/b4521fea-13fc-4cef-a878-64291b273e7b-kube-api-access-c96kh\") pod \"redhat-operators-6hvvq\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:05 crc kubenswrapper[4932]: I1125 11:11:05.094790 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:05 crc kubenswrapper[4932]: I1125 11:11:05.592617 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6hvvq"] Nov 25 11:11:05 crc kubenswrapper[4932]: I1125 11:11:05.806548 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6hvvq" event={"ID":"b4521fea-13fc-4cef-a878-64291b273e7b","Type":"ContainerStarted","Data":"c2a8c742b4103bb63b6db1598e62405655bc6090863f8ec78d7fe604c49ecaee"} Nov 25 11:11:06 crc kubenswrapper[4932]: I1125 11:11:06.818506 4932 generic.go:334] "Generic (PLEG): container finished" podID="b4521fea-13fc-4cef-a878-64291b273e7b" containerID="79a2503fa034e8e8e1ff28af186e0d965aa987e8ce65276caececb649758e00a" exitCode=0 Nov 25 11:11:06 crc kubenswrapper[4932]: I1125 11:11:06.818564 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6hvvq" event={"ID":"b4521fea-13fc-4cef-a878-64291b273e7b","Type":"ContainerDied","Data":"79a2503fa034e8e8e1ff28af186e0d965aa987e8ce65276caececb649758e00a"} Nov 25 11:11:08 crc kubenswrapper[4932]: I1125 11:11:08.840213 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6hvvq" event={"ID":"b4521fea-13fc-4cef-a878-64291b273e7b","Type":"ContainerStarted","Data":"9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b"} Nov 25 11:11:14 crc kubenswrapper[4932]: I1125 11:11:14.607606 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:11:14 crc kubenswrapper[4932]: E1125 11:11:14.608842 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:11:14 crc kubenswrapper[4932]: I1125 11:11:14.898376 4932 generic.go:334] "Generic (PLEG): container finished" podID="b4521fea-13fc-4cef-a878-64291b273e7b" containerID="9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b" exitCode=0 Nov 25 11:11:14 crc kubenswrapper[4932]: I1125 11:11:14.898423 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6hvvq" event={"ID":"b4521fea-13fc-4cef-a878-64291b273e7b","Type":"ContainerDied","Data":"9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b"} Nov 25 11:11:15 crc kubenswrapper[4932]: I1125 11:11:15.911640 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6hvvq" event={"ID":"b4521fea-13fc-4cef-a878-64291b273e7b","Type":"ContainerStarted","Data":"a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9"} Nov 25 11:11:15 crc kubenswrapper[4932]: I1125 11:11:15.944141 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6hvvq" podStartSLOduration=3.429387499 podStartE2EDuration="11.944114963s" podCreationTimestamp="2025-11-25 11:11:04 +0000 UTC" firstStartedPulling="2025-11-25 11:11:06.821151553 +0000 UTC m=+8526.947181116" lastFinishedPulling="2025-11-25 11:11:15.335879017 +0000 UTC m=+8535.461908580" observedRunningTime="2025-11-25 11:11:15.935622169 +0000 UTC m=+8536.061651732" watchObservedRunningTime="2025-11-25 11:11:15.944114963 +0000 UTC m=+8536.070144526" Nov 25 11:11:25 crc kubenswrapper[4932]: I1125 11:11:25.095734 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:25 crc kubenswrapper[4932]: I1125 11:11:25.096288 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:26 crc kubenswrapper[4932]: I1125 11:11:26.148550 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6hvvq" podUID="b4521fea-13fc-4cef-a878-64291b273e7b" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:26 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:26 crc kubenswrapper[4932]: > Nov 25 11:11:29 crc kubenswrapper[4932]: I1125 11:11:29.606101 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:11:29 crc kubenswrapper[4932]: E1125 11:11:29.607016 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:11:36 crc kubenswrapper[4932]: I1125 11:11:36.208893 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6hvvq" podUID="b4521fea-13fc-4cef-a878-64291b273e7b" containerName="registry-server" probeResult="failure" output=< Nov 25 11:11:36 crc kubenswrapper[4932]: timeout: failed to connect service ":50051" within 1s Nov 25 11:11:36 crc kubenswrapper[4932]: > Nov 25 11:11:44 crc kubenswrapper[4932]: I1125 11:11:44.606697 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:11:44 crc kubenswrapper[4932]: E1125 11:11:44.609361 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:11:45 crc kubenswrapper[4932]: I1125 11:11:45.144250 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:45 crc kubenswrapper[4932]: I1125 11:11:45.192884 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:45 crc kubenswrapper[4932]: I1125 11:11:45.377780 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6hvvq"] Nov 25 11:11:46 crc kubenswrapper[4932]: I1125 11:11:46.211736 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6hvvq" podUID="b4521fea-13fc-4cef-a878-64291b273e7b" containerName="registry-server" containerID="cri-o://a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9" gracePeriod=2 Nov 25 11:11:46 crc kubenswrapper[4932]: I1125 11:11:46.896497 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:46 crc kubenswrapper[4932]: I1125 11:11:46.969964 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-catalog-content\") pod \"b4521fea-13fc-4cef-a878-64291b273e7b\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " Nov 25 11:11:46 crc kubenswrapper[4932]: I1125 11:11:46.970011 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-utilities\") pod \"b4521fea-13fc-4cef-a878-64291b273e7b\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " Nov 25 11:11:46 crc kubenswrapper[4932]: I1125 11:11:46.970108 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c96kh\" (UniqueName: \"kubernetes.io/projected/b4521fea-13fc-4cef-a878-64291b273e7b-kube-api-access-c96kh\") pod \"b4521fea-13fc-4cef-a878-64291b273e7b\" (UID: \"b4521fea-13fc-4cef-a878-64291b273e7b\") " Nov 25 11:11:46 crc kubenswrapper[4932]: I1125 11:11:46.971334 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-utilities" (OuterVolumeSpecName: "utilities") pod "b4521fea-13fc-4cef-a878-64291b273e7b" (UID: "b4521fea-13fc-4cef-a878-64291b273e7b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:11:46 crc kubenswrapper[4932]: I1125 11:11:46.976258 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4521fea-13fc-4cef-a878-64291b273e7b-kube-api-access-c96kh" (OuterVolumeSpecName: "kube-api-access-c96kh") pod "b4521fea-13fc-4cef-a878-64291b273e7b" (UID: "b4521fea-13fc-4cef-a878-64291b273e7b"). InnerVolumeSpecName "kube-api-access-c96kh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.050309 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b4521fea-13fc-4cef-a878-64291b273e7b" (UID: "b4521fea-13fc-4cef-a878-64291b273e7b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.073018 4932 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.073060 4932 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b4521fea-13fc-4cef-a878-64291b273e7b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.073070 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c96kh\" (UniqueName: \"kubernetes.io/projected/b4521fea-13fc-4cef-a878-64291b273e7b-kube-api-access-c96kh\") on node \"crc\" DevicePath \"\"" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.224797 4932 generic.go:334] "Generic (PLEG): container finished" podID="b4521fea-13fc-4cef-a878-64291b273e7b" containerID="a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9" exitCode=0 Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.224855 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6hvvq" event={"ID":"b4521fea-13fc-4cef-a878-64291b273e7b","Type":"ContainerDied","Data":"a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9"} Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.224881 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6hvvq" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.224896 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6hvvq" event={"ID":"b4521fea-13fc-4cef-a878-64291b273e7b","Type":"ContainerDied","Data":"c2a8c742b4103bb63b6db1598e62405655bc6090863f8ec78d7fe604c49ecaee"} Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.224921 4932 scope.go:117] "RemoveContainer" containerID="a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.260012 4932 scope.go:117] "RemoveContainer" containerID="9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.269629 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6hvvq"] Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.278396 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6hvvq"] Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.283503 4932 scope.go:117] "RemoveContainer" containerID="79a2503fa034e8e8e1ff28af186e0d965aa987e8ce65276caececb649758e00a" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.327837 4932 scope.go:117] "RemoveContainer" containerID="a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9" Nov 25 11:11:47 crc kubenswrapper[4932]: E1125 11:11:47.328971 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9\": container with ID starting with a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9 not found: ID does not exist" containerID="a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.329021 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9"} err="failed to get container status \"a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9\": rpc error: code = NotFound desc = could not find container \"a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9\": container with ID starting with a8021169606be0e887d2812de6f6addbc2342e6bed25ba4c8726c3e8443b82a9 not found: ID does not exist" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.329057 4932 scope.go:117] "RemoveContainer" containerID="9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b" Nov 25 11:11:47 crc kubenswrapper[4932]: E1125 11:11:47.329454 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b\": container with ID starting with 9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b not found: ID does not exist" containerID="9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.329486 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b"} err="failed to get container status \"9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b\": rpc error: code = NotFound desc = could not find container \"9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b\": container with ID starting with 9b279f9dff25c32cfedbfab32d3aced69c4e453aab676fcb47d13224a215f07b not found: ID does not exist" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.329507 4932 scope.go:117] "RemoveContainer" containerID="79a2503fa034e8e8e1ff28af186e0d965aa987e8ce65276caececb649758e00a" Nov 25 11:11:47 crc kubenswrapper[4932]: E1125 11:11:47.329745 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79a2503fa034e8e8e1ff28af186e0d965aa987e8ce65276caececb649758e00a\": container with ID starting with 79a2503fa034e8e8e1ff28af186e0d965aa987e8ce65276caececb649758e00a not found: ID does not exist" containerID="79a2503fa034e8e8e1ff28af186e0d965aa987e8ce65276caececb649758e00a" Nov 25 11:11:47 crc kubenswrapper[4932]: I1125 11:11:47.329820 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79a2503fa034e8e8e1ff28af186e0d965aa987e8ce65276caececb649758e00a"} err="failed to get container status \"79a2503fa034e8e8e1ff28af186e0d965aa987e8ce65276caececb649758e00a\": rpc error: code = NotFound desc = could not find container \"79a2503fa034e8e8e1ff28af186e0d965aa987e8ce65276caececb649758e00a\": container with ID starting with 79a2503fa034e8e8e1ff28af186e0d965aa987e8ce65276caececb649758e00a not found: ID does not exist" Nov 25 11:11:48 crc kubenswrapper[4932]: I1125 11:11:48.620552 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4521fea-13fc-4cef-a878-64291b273e7b" path="/var/lib/kubelet/pods/b4521fea-13fc-4cef-a878-64291b273e7b/volumes" Nov 25 11:11:58 crc kubenswrapper[4932]: I1125 11:11:58.606054 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:11:58 crc kubenswrapper[4932]: E1125 11:11:58.606933 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.662068 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 11:12:07 crc kubenswrapper[4932]: E1125 11:12:07.663323 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4521fea-13fc-4cef-a878-64291b273e7b" containerName="registry-server" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.663341 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4521fea-13fc-4cef-a878-64291b273e7b" containerName="registry-server" Nov 25 11:12:07 crc kubenswrapper[4932]: E1125 11:12:07.663353 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4521fea-13fc-4cef-a878-64291b273e7b" containerName="extract-content" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.663360 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4521fea-13fc-4cef-a878-64291b273e7b" containerName="extract-content" Nov 25 11:12:07 crc kubenswrapper[4932]: E1125 11:12:07.663394 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4521fea-13fc-4cef-a878-64291b273e7b" containerName="extract-utilities" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.663403 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4521fea-13fc-4cef-a878-64291b273e7b" containerName="extract-utilities" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.663691 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4521fea-13fc-4cef-a878-64291b273e7b" containerName="registry-server" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.664780 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.670932 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.673091 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.682272 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.740638 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.740903 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.842904 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.842969 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.843069 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.864077 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:12:07 crc kubenswrapper[4932]: I1125 11:12:07.996399 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:12:08 crc kubenswrapper[4932]: I1125 11:12:08.450175 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 11:12:09 crc kubenswrapper[4932]: I1125 11:12:09.444413 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb","Type":"ContainerStarted","Data":"1654f491004cae1851610c43f7f620cd86e5fcd18bb5bb17da034fb2d1b06bb3"} Nov 25 11:12:10 crc kubenswrapper[4932]: I1125 11:12:10.463416 4932 generic.go:334] "Generic (PLEG): container finished" podID="87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb" containerID="1438bfd24f1a02bac47273d4f45fe3a04e869f65d17ca2b7acb6fd4b4a2f4e16" exitCode=0 Nov 25 11:12:10 crc kubenswrapper[4932]: I1125 11:12:10.463471 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb","Type":"ContainerDied","Data":"1438bfd24f1a02bac47273d4f45fe3a04e869f65d17ca2b7acb6fd4b4a2f4e16"} Nov 25 11:12:11 crc kubenswrapper[4932]: I1125 11:12:11.847170 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:12:11 crc kubenswrapper[4932]: I1125 11:12:11.936710 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kube-api-access\") pod \"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb\" (UID: \"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb\") " Nov 25 11:12:11 crc kubenswrapper[4932]: I1125 11:12:11.937134 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kubelet-dir\") pod \"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb\" (UID: \"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb\") " Nov 25 11:12:11 crc kubenswrapper[4932]: I1125 11:12:11.937455 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb" (UID: "87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:12:11 crc kubenswrapper[4932]: I1125 11:12:11.937851 4932 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:12:11 crc kubenswrapper[4932]: I1125 11:12:11.945149 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb" (UID: "87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.041376 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.483264 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb","Type":"ContainerDied","Data":"1654f491004cae1851610c43f7f620cd86e5fcd18bb5bb17da034fb2d1b06bb3"} Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.483320 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1654f491004cae1851610c43f7f620cd86e5fcd18bb5bb17da034fb2d1b06bb3" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.483364 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.655652 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 11:12:12 crc kubenswrapper[4932]: E1125 11:12:12.656506 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb" containerName="pruner" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.656535 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb" containerName="pruner" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.656870 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="87fb1310-bfd5-41f8-bdaa-a22be4b1c4eb" containerName="pruner" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.657953 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.659946 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.665551 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.670621 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.756328 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.756461 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/09b35ad9-d552-4b4c-a28c-837961e2f44a-kube-api-access\") pod \"installer-9-crc\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.756502 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-var-lock\") pod \"installer-9-crc\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.858855 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/09b35ad9-d552-4b4c-a28c-837961e2f44a-kube-api-access\") pod \"installer-9-crc\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.859531 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-var-lock\") pod \"installer-9-crc\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.859610 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-var-lock\") pod \"installer-9-crc\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.859790 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.859860 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-kubelet-dir\") pod \"installer-9-crc\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.877070 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/09b35ad9-d552-4b4c-a28c-837961e2f44a-kube-api-access\") pod \"installer-9-crc\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:12 crc kubenswrapper[4932]: I1125 11:12:12.977267 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:13 crc kubenswrapper[4932]: I1125 11:12:13.484627 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 11:12:13 crc kubenswrapper[4932]: I1125 11:12:13.500286 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"09b35ad9-d552-4b4c-a28c-837961e2f44a","Type":"ContainerStarted","Data":"4017c5f81677d17da069e52a9bb649e6c3e2d26a0962ee41f9790290e7e3555f"} Nov 25 11:12:13 crc kubenswrapper[4932]: I1125 11:12:13.606622 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:12:13 crc kubenswrapper[4932]: E1125 11:12:13.606972 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:12:14 crc kubenswrapper[4932]: I1125 11:12:14.511338 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"09b35ad9-d552-4b4c-a28c-837961e2f44a","Type":"ContainerStarted","Data":"df689441fbb87650d92d04f9230d0beffbd0b65bc16f7a17473603b193b34aec"} Nov 25 11:12:14 crc kubenswrapper[4932]: I1125 11:12:14.532876 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.5328531439999997 podStartE2EDuration="2.532853144s" podCreationTimestamp="2025-11-25 11:12:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:12:14.523817795 +0000 UTC m=+8594.649847358" watchObservedRunningTime="2025-11-25 11:12:14.532853144 +0000 UTC m=+8594.658882707" Nov 25 11:12:27 crc kubenswrapper[4932]: I1125 11:12:27.606768 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:12:27 crc kubenswrapper[4932]: E1125 11:12:27.607717 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:12:40 crc kubenswrapper[4932]: I1125 11:12:40.612621 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:12:40 crc kubenswrapper[4932]: E1125 11:12:40.614551 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.582210 4932 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.584548 4932 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.584583 4932 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.584718 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.585495 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961" gracePeriod=15 Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.585548 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a" gracePeriod=15 Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.585583 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35" gracePeriod=15 Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.585517 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb" gracePeriod=15 Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.585571 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d" gracePeriod=15 Nov 25 11:12:51 crc kubenswrapper[4932]: E1125 11:12:51.586221 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586243 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 11:12:51 crc kubenswrapper[4932]: E1125 11:12:51.586257 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586264 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 11:12:51 crc kubenswrapper[4932]: E1125 11:12:51.586288 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586294 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:12:51 crc kubenswrapper[4932]: E1125 11:12:51.586308 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586314 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 11:12:51 crc kubenswrapper[4932]: E1125 11:12:51.586324 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586330 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:12:51 crc kubenswrapper[4932]: E1125 11:12:51.586352 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586359 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 11:12:51 crc kubenswrapper[4932]: E1125 11:12:51.586369 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586375 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586599 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586624 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586638 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586652 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586663 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.586682 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.589246 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.634974 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.635053 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.635078 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.635108 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.635251 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.635281 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.635419 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.635439 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.737818 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.738322 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.738258 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.738413 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.738424 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.738474 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.738583 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.738611 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.738694 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.738744 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.739813 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.739845 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.739886 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.739960 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.740121 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.740255 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.886067 4932 generic.go:334] "Generic (PLEG): container finished" podID="09b35ad9-d552-4b4c-a28c-837961e2f44a" containerID="df689441fbb87650d92d04f9230d0beffbd0b65bc16f7a17473603b193b34aec" exitCode=0 Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.886141 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"09b35ad9-d552-4b4c-a28c-837961e2f44a","Type":"ContainerDied","Data":"df689441fbb87650d92d04f9230d0beffbd0b65bc16f7a17473603b193b34aec"} Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.886997 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.892225 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.894001 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.895993 4932 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d" exitCode=0 Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.896017 4932 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb" exitCode=0 Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.896024 4932 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a" exitCode=0 Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.896031 4932 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35" exitCode=2 Nov 25 11:12:51 crc kubenswrapper[4932]: I1125 11:12:51.896077 4932 scope.go:117] "RemoveContainer" containerID="b5b5fd276c32712b98f5be96abcc3fdbdb105eed8400077a9fa55cbf04c8775c" Nov 25 11:12:52 crc kubenswrapper[4932]: I1125 11:12:52.912052 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.307963 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.309940 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.378239 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/09b35ad9-d552-4b4c-a28c-837961e2f44a-kube-api-access\") pod \"09b35ad9-d552-4b4c-a28c-837961e2f44a\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.378534 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-var-lock\") pod \"09b35ad9-d552-4b4c-a28c-837961e2f44a\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.378620 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-kubelet-dir\") pod \"09b35ad9-d552-4b4c-a28c-837961e2f44a\" (UID: \"09b35ad9-d552-4b4c-a28c-837961e2f44a\") " Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.378620 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-var-lock" (OuterVolumeSpecName: "var-lock") pod "09b35ad9-d552-4b4c-a28c-837961e2f44a" (UID: "09b35ad9-d552-4b4c-a28c-837961e2f44a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.378641 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "09b35ad9-d552-4b4c-a28c-837961e2f44a" (UID: "09b35ad9-d552-4b4c-a28c-837961e2f44a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.379096 4932 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.379113 4932 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/09b35ad9-d552-4b4c-a28c-837961e2f44a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.384422 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09b35ad9-d552-4b4c-a28c-837961e2f44a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "09b35ad9-d552-4b4c-a28c-837961e2f44a" (UID: "09b35ad9-d552-4b4c-a28c-837961e2f44a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.481067 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/09b35ad9-d552-4b4c-a28c-837961e2f44a-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.925456 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"09b35ad9-d552-4b4c-a28c-837961e2f44a","Type":"ContainerDied","Data":"4017c5f81677d17da069e52a9bb649e6c3e2d26a0962ee41f9790290e7e3555f"} Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.926075 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4017c5f81677d17da069e52a9bb649e6c3e2d26a0962ee41f9790290e7e3555f" Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.925489 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 11:12:53 crc kubenswrapper[4932]: I1125 11:12:53.943424 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.428779 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.430127 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.431133 4932 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.431499 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.526564 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.526706 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.526731 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.526776 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.526923 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.526987 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.527452 4932 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.527472 4932 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.527481 4932 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.607119 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:12:54 crc kubenswrapper[4932]: E1125 11:12:54.607433 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.626325 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.944797 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.945822 4932 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961" exitCode=0 Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.945889 4932 scope.go:117] "RemoveContainer" containerID="79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.946061 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.947911 4932 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.948793 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.957101 4932 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.957725 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:12:54 crc kubenswrapper[4932]: I1125 11:12:54.976271 4932 scope.go:117] "RemoveContainer" containerID="8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.000771 4932 scope.go:117] "RemoveContainer" containerID="cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.030505 4932 scope.go:117] "RemoveContainer" containerID="9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.062908 4932 scope.go:117] "RemoveContainer" containerID="5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.116118 4932 scope.go:117] "RemoveContainer" containerID="d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.160271 4932 scope.go:117] "RemoveContainer" containerID="79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d" Nov 25 11:12:55 crc kubenswrapper[4932]: E1125 11:12:55.161132 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\": container with ID starting with 79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d not found: ID does not exist" containerID="79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.161163 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d"} err="failed to get container status \"79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\": rpc error: code = NotFound desc = could not find container \"79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d\": container with ID starting with 79e277d9732fd31a5730c85a585fa327a581ada87caa2f867283c91cb13bce3d not found: ID does not exist" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.161199 4932 scope.go:117] "RemoveContainer" containerID="8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb" Nov 25 11:12:55 crc kubenswrapper[4932]: E1125 11:12:55.161691 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\": container with ID starting with 8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb not found: ID does not exist" containerID="8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.161776 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb"} err="failed to get container status \"8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\": rpc error: code = NotFound desc = could not find container \"8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb\": container with ID starting with 8097c9729cf48189f16c16d2e90d5020abe7aa7848b8f5994e00b24addc37feb not found: ID does not exist" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.161839 4932 scope.go:117] "RemoveContainer" containerID="cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a" Nov 25 11:12:55 crc kubenswrapper[4932]: E1125 11:12:55.162224 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\": container with ID starting with cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a not found: ID does not exist" containerID="cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.162255 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a"} err="failed to get container status \"cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\": rpc error: code = NotFound desc = could not find container \"cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a\": container with ID starting with cd9e9c02db89017766295053c0327a74020d91a84198352cecda23fe7d65f88a not found: ID does not exist" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.162273 4932 scope.go:117] "RemoveContainer" containerID="9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35" Nov 25 11:12:55 crc kubenswrapper[4932]: E1125 11:12:55.162521 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\": container with ID starting with 9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35 not found: ID does not exist" containerID="9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.162549 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35"} err="failed to get container status \"9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\": rpc error: code = NotFound desc = could not find container \"9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35\": container with ID starting with 9b09746ab95d15c6a19a8999162c5ca477c98d735ee6d4b9faf2ec3fcb840d35 not found: ID does not exist" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.162568 4932 scope.go:117] "RemoveContainer" containerID="5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961" Nov 25 11:12:55 crc kubenswrapper[4932]: E1125 11:12:55.162858 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\": container with ID starting with 5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961 not found: ID does not exist" containerID="5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.162889 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961"} err="failed to get container status \"5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\": rpc error: code = NotFound desc = could not find container \"5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961\": container with ID starting with 5287bf2eecd5bbdcc01d049cdbfff92d08ff16f87a71c53210e35656ca697961 not found: ID does not exist" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.162907 4932 scope.go:117] "RemoveContainer" containerID="d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81" Nov 25 11:12:55 crc kubenswrapper[4932]: E1125 11:12:55.163403 4932 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\": container with ID starting with d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81 not found: ID does not exist" containerID="d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81" Nov 25 11:12:55 crc kubenswrapper[4932]: I1125 11:12:55.163458 4932 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81"} err="failed to get container status \"d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\": rpc error: code = NotFound desc = could not find container \"d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81\": container with ID starting with d892cfe4ae6315300e1a989ac3074b95a870f0492c4edd96b9fed16ba5e14e81 not found: ID does not exist" Nov 25 11:12:56 crc kubenswrapper[4932]: E1125 11:12:56.645412 4932 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.77:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:56 crc kubenswrapper[4932]: I1125 11:12:56.646755 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:56 crc kubenswrapper[4932]: E1125 11:12:56.683779 4932 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.77:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b3b91db69f63d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 11:12:56.683132477 +0000 UTC m=+8636.809162050,LastTimestamp:2025-11-25 11:12:56.683132477 +0000 UTC m=+8636.809162050,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 11:12:56 crc kubenswrapper[4932]: I1125 11:12:56.967753 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"64c414f067d20e2bbdf4e7b74843f8be5df026a46b18741bfedce28a906f7262"} Nov 25 11:12:57 crc kubenswrapper[4932]: I1125 11:12:57.977484 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"8d9fa95132aa8985559983253b5d2d61094b2a502076f4a14c011df3a4ae9e86"} Nov 25 11:12:57 crc kubenswrapper[4932]: E1125 11:12:57.978091 4932 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.77:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:57 crc kubenswrapper[4932]: I1125 11:12:57.978286 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:12:58 crc kubenswrapper[4932]: I1125 11:12:58.465006 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="92b56b05-8133-4f01-a855-7bb7f523b38c" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 11:12:58 crc kubenswrapper[4932]: E1125 11:12:58.989465 4932 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.77:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:12:59 crc kubenswrapper[4932]: E1125 11:12:59.695180 4932 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/ovndbcluster-sb-etc-ovn-ovsdbserver-sb-0\": dial tcp 38.102.83.77:6443: connect: connection refused" pod="openstack/ovsdbserver-sb-0" volumeName="ovndbcluster-sb-etc-ovn" Nov 25 11:13:00 crc kubenswrapper[4932]: E1125 11:13:00.598156 4932 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:00 crc kubenswrapper[4932]: E1125 11:13:00.598838 4932 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:00 crc kubenswrapper[4932]: E1125 11:13:00.599076 4932 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:00 crc kubenswrapper[4932]: E1125 11:13:00.599388 4932 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:00 crc kubenswrapper[4932]: E1125 11:13:00.599692 4932 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:00 crc kubenswrapper[4932]: I1125 11:13:00.599737 4932 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 25 11:13:00 crc kubenswrapper[4932]: E1125 11:13:00.600079 4932 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" interval="200ms" Nov 25 11:13:00 crc kubenswrapper[4932]: I1125 11:13:00.614777 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:00 crc kubenswrapper[4932]: E1125 11:13:00.800805 4932 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" interval="400ms" Nov 25 11:13:01 crc kubenswrapper[4932]: E1125 11:13:01.201739 4932 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" interval="800ms" Nov 25 11:13:02 crc kubenswrapper[4932]: E1125 11:13:02.003424 4932 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" interval="1.6s" Nov 25 11:13:02 crc kubenswrapper[4932]: E1125 11:13:02.656716 4932 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openstack/mysql-db-openstack-galera-0: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack/persistentvolumeclaims/mysql-db-openstack-galera-0\": dial tcp 38.102.83.77:6443: connect: connection refused" pod="openstack/openstack-galera-0" volumeName="mysql-db" Nov 25 11:13:03 crc kubenswrapper[4932]: E1125 11:13:03.606099 4932 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.77:6443: connect: connection refused" interval="3.2s" Nov 25 11:13:03 crc kubenswrapper[4932]: E1125 11:13:03.679025 4932 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.77:6443: connect: connection refused" pod="openshift-image-registry/image-registry-66df7c8f76-js6tz" volumeName="registry-storage" Nov 25 11:13:04 crc kubenswrapper[4932]: I1125 11:13:04.040302 4932 generic.go:334] "Generic (PLEG): container finished" podID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" containerID="76a6ba4522af11237dab4ac8b9195c17bdf7234c5cf79f3c9640a6b9b084af9e" exitCode=1 Nov 25 11:13:04 crc kubenswrapper[4932]: I1125 11:13:04.040349 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" event={"ID":"b7db3ea2-66e6-46f2-93b4-4c8405a1b566","Type":"ContainerDied","Data":"76a6ba4522af11237dab4ac8b9195c17bdf7234c5cf79f3c9640a6b9b084af9e"} Nov 25 11:13:04 crc kubenswrapper[4932]: I1125 11:13:04.041108 4932 scope.go:117] "RemoveContainer" containerID="76a6ba4522af11237dab4ac8b9195c17bdf7234c5cf79f3c9640a6b9b084af9e" Nov 25 11:13:04 crc kubenswrapper[4932]: I1125 11:13:04.041291 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:04 crc kubenswrapper[4932]: I1125 11:13:04.041741 4932 status_manager.go:851] "Failed to get status for pod" podUID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-76dcd9496-2bqxl\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:04 crc kubenswrapper[4932]: I1125 11:13:04.936895 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.054759 4932 generic.go:334] "Generic (PLEG): container finished" podID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" containerID="4f1001b7b9224e3afce6c4b710f1810a65e4769f19ad248c9bc6aee202143ed1" exitCode=1 Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.054834 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" event={"ID":"b7db3ea2-66e6-46f2-93b4-4c8405a1b566","Type":"ContainerDied","Data":"4f1001b7b9224e3afce6c4b710f1810a65e4769f19ad248c9bc6aee202143ed1"} Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.054901 4932 scope.go:117] "RemoveContainer" containerID="76a6ba4522af11237dab4ac8b9195c17bdf7234c5cf79f3c9640a6b9b084af9e" Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.055750 4932 scope.go:117] "RemoveContainer" containerID="4f1001b7b9224e3afce6c4b710f1810a65e4769f19ad248c9bc6aee202143ed1" Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.055856 4932 status_manager.go:851] "Failed to get status for pod" podUID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-76dcd9496-2bqxl\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.056159 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:05 crc kubenswrapper[4932]: E1125 11:13:05.056504 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-76dcd9496-2bqxl_metallb-system(b7db3ea2-66e6-46f2-93b4-4c8405a1b566)\"" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" podUID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" Nov 25 11:13:05 crc kubenswrapper[4932]: E1125 11:13:05.540047 4932 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.77:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b3b91db69f63d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 11:12:56.683132477 +0000 UTC m=+8636.809162050,LastTimestamp:2025-11-25 11:12:56.683132477 +0000 UTC m=+8636.809162050,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.605862 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.606920 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.607171 4932 status_manager.go:851] "Failed to get status for pod" podUID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-76dcd9496-2bqxl\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:05 crc kubenswrapper[4932]: E1125 11:13:05.607683 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.607720 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.623902 4932 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f3bab695-50e7-421a-a4ff-901ab01fd6e7" Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.623938 4932 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f3bab695-50e7-421a-a4ff-901ab01fd6e7" Nov 25 11:13:05 crc kubenswrapper[4932]: E1125 11:13:05.624463 4932 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:13:05 crc kubenswrapper[4932]: I1125 11:13:05.625202 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:13:05 crc kubenswrapper[4932]: W1125 11:13:05.652909 4932 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-c6fde22811508994f824818ca61010e479c93e197891ee391d3e9e0a23daba0d WatchSource:0}: Error finding container c6fde22811508994f824818ca61010e479c93e197891ee391d3e9e0a23daba0d: Status 404 returned error can't find the container with id c6fde22811508994f824818ca61010e479c93e197891ee391d3e9e0a23daba0d Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.069770 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.070167 4932 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79" exitCode=1 Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.070235 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79"} Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.070728 4932 scope.go:117] "RemoveContainer" containerID="ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.072287 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.072505 4932 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.072685 4932 status_manager.go:851] "Failed to get status for pod" podUID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-76dcd9496-2bqxl\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.076342 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.076557 4932 scope.go:117] "RemoveContainer" containerID="4f1001b7b9224e3afce6c4b710f1810a65e4769f19ad248c9bc6aee202143ed1" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.076611 4932 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.076860 4932 status_manager.go:851] "Failed to get status for pod" podUID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-76dcd9496-2bqxl\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:06 crc kubenswrapper[4932]: E1125 11:13:06.076862 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-76dcd9496-2bqxl_metallb-system(b7db3ea2-66e6-46f2-93b4-4c8405a1b566)\"" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" podUID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.078745 4932 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="46e0012271c515d4e88c8e5171e51318084782da20d31992c223c9a0787a8e16" exitCode=0 Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.078779 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"46e0012271c515d4e88c8e5171e51318084782da20d31992c223c9a0787a8e16"} Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.078804 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c6fde22811508994f824818ca61010e479c93e197891ee391d3e9e0a23daba0d"} Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.079044 4932 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f3bab695-50e7-421a-a4ff-901ab01fd6e7" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.079062 4932 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f3bab695-50e7-421a-a4ff-901ab01fd6e7" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.079474 4932 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:06 crc kubenswrapper[4932]: E1125 11:13:06.079497 4932 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.079664 4932 status_manager.go:851] "Failed to get status for pod" podUID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-76dcd9496-2bqxl\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:06 crc kubenswrapper[4932]: I1125 11:13:06.079976 4932 status_manager.go:851] "Failed to get status for pod" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.77:6443: connect: connection refused" Nov 25 11:13:07 crc kubenswrapper[4932]: I1125 11:13:07.102161 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d2cdd88442287c666c03eee52f30f2e1b6b4b5b377fe04766db14df81d745e8c"} Nov 25 11:13:07 crc kubenswrapper[4932]: I1125 11:13:07.102736 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fc4d31f0cc90c87ef090ecaf2582f682adc94119b64e8a516ea8477f78ed323a"} Nov 25 11:13:07 crc kubenswrapper[4932]: I1125 11:13:07.102753 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"ddd66bdf534041809d04ffc5184bb12bf0c77418179adc631b88f363c312a617"} Nov 25 11:13:07 crc kubenswrapper[4932]: I1125 11:13:07.107350 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 11:13:07 crc kubenswrapper[4932]: I1125 11:13:07.107422 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"c48febefcc9258f783b207adcbe934f8260825b0b936d2d1d85c4de7fcea5a99"} Nov 25 11:13:08 crc kubenswrapper[4932]: I1125 11:13:08.118448 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"431d6b65985fe70d2697b42f7fce2cae38e8c0cf6f79ad6f688590edeedee33a"} Nov 25 11:13:08 crc kubenswrapper[4932]: I1125 11:13:08.118798 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2cbe7deb9fc533ebf9a5a1c3fead8eb9b55ca8a81dfb5b4e25ee7b78a193e501"} Nov 25 11:13:08 crc kubenswrapper[4932]: I1125 11:13:08.118819 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:13:08 crc kubenswrapper[4932]: I1125 11:13:08.118810 4932 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f3bab695-50e7-421a-a4ff-901ab01fd6e7" Nov 25 11:13:08 crc kubenswrapper[4932]: I1125 11:13:08.118842 4932 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f3bab695-50e7-421a-a4ff-901ab01fd6e7" Nov 25 11:13:08 crc kubenswrapper[4932]: I1125 11:13:08.496740 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="92b56b05-8133-4f01-a855-7bb7f523b38c" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 11:13:09 crc kubenswrapper[4932]: I1125 11:13:09.182024 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 11:13:10 crc kubenswrapper[4932]: I1125 11:13:10.626442 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:13:10 crc kubenswrapper[4932]: I1125 11:13:10.626818 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:13:10 crc kubenswrapper[4932]: I1125 11:13:10.633726 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:13:12 crc kubenswrapper[4932]: I1125 11:13:12.193474 4932 generic.go:334] "Generic (PLEG): container finished" podID="d4860edf-9f45-4dd2-8e35-7c3a4444370a" containerID="a82748d48467f19e2ec49734076e60948c2d035072487bd0205e49acac781dc4" exitCode=1 Nov 25 11:13:12 crc kubenswrapper[4932]: I1125 11:13:12.193554 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" event={"ID":"d4860edf-9f45-4dd2-8e35-7c3a4444370a","Type":"ContainerDied","Data":"a82748d48467f19e2ec49734076e60948c2d035072487bd0205e49acac781dc4"} Nov 25 11:13:12 crc kubenswrapper[4932]: I1125 11:13:12.194739 4932 scope.go:117] "RemoveContainer" containerID="a82748d48467f19e2ec49734076e60948c2d035072487bd0205e49acac781dc4" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.127465 4932 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.206406 4932 generic.go:334] "Generic (PLEG): container finished" podID="65fb5603-367e-431f-a8d3-0a3281a70361" containerID="a122f3f5d243279d93b07ffeaacd6c6d1cc73f4a0d88687eeaa2ef201d308db1" exitCode=1 Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.206494 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" event={"ID":"65fb5603-367e-431f-a8d3-0a3281a70361","Type":"ContainerDied","Data":"a122f3f5d243279d93b07ffeaacd6c6d1cc73f4a0d88687eeaa2ef201d308db1"} Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.207797 4932 scope.go:117] "RemoveContainer" containerID="a122f3f5d243279d93b07ffeaacd6c6d1cc73f4a0d88687eeaa2ef201d308db1" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.211016 4932 generic.go:334] "Generic (PLEG): container finished" podID="96d031ad-3550-4423-9422-93911c9a8217" containerID="e085beb3bbc4f9973ec8d9f647374f6a5724be783eb3ad3796a5cc06e35a1200" exitCode=1 Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.211109 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" event={"ID":"96d031ad-3550-4423-9422-93911c9a8217","Type":"ContainerDied","Data":"e085beb3bbc4f9973ec8d9f647374f6a5724be783eb3ad3796a5cc06e35a1200"} Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.212168 4932 scope.go:117] "RemoveContainer" containerID="e085beb3bbc4f9973ec8d9f647374f6a5724be783eb3ad3796a5cc06e35a1200" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.220583 4932 generic.go:334] "Generic (PLEG): container finished" podID="d4860edf-9f45-4dd2-8e35-7c3a4444370a" containerID="8deeb5e2a244df4ff2b4ea87d99ae84d521f8ac30b6eb7f0915a625194d7a8b1" exitCode=1 Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.220676 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" event={"ID":"d4860edf-9f45-4dd2-8e35-7c3a4444370a","Type":"ContainerDied","Data":"8deeb5e2a244df4ff2b4ea87d99ae84d521f8ac30b6eb7f0915a625194d7a8b1"} Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.220722 4932 scope.go:117] "RemoveContainer" containerID="a82748d48467f19e2ec49734076e60948c2d035072487bd0205e49acac781dc4" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.221957 4932 scope.go:117] "RemoveContainer" containerID="8deeb5e2a244df4ff2b4ea87d99ae84d521f8ac30b6eb7f0915a625194d7a8b1" Nov 25 11:13:13 crc kubenswrapper[4932]: E1125 11:13:13.222326 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-tkjb4_openstack-operators(d4860edf-9f45-4dd2-8e35-7c3a4444370a)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" podUID="d4860edf-9f45-4dd2-8e35-7c3a4444370a" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.224171 4932 generic.go:334] "Generic (PLEG): container finished" podID="bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd" containerID="6bbe52121c4b99bd1b164c8815329292b7368a45d866693592376057fe50d359" exitCode=1 Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.224261 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" event={"ID":"bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd","Type":"ContainerDied","Data":"6bbe52121c4b99bd1b164c8815329292b7368a45d866693592376057fe50d359"} Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.225084 4932 scope.go:117] "RemoveContainer" containerID="6bbe52121c4b99bd1b164c8815329292b7368a45d866693592376057fe50d359" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.228181 4932 generic.go:334] "Generic (PLEG): container finished" podID="af96b4c7-e9eb-4609-afab-ba3cc15f0a48" containerID="d76a49c47f51346babbace38abde4866130679b37e8786aa1c6aaf826f3af8b1" exitCode=1 Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.228286 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" event={"ID":"af96b4c7-e9eb-4609-afab-ba3cc15f0a48","Type":"ContainerDied","Data":"d76a49c47f51346babbace38abde4866130679b37e8786aa1c6aaf826f3af8b1"} Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.229376 4932 scope.go:117] "RemoveContainer" containerID="d76a49c47f51346babbace38abde4866130679b37e8786aa1c6aaf826f3af8b1" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.231920 4932 generic.go:334] "Generic (PLEG): container finished" podID="12f70ae4-14e2-4eed-9c1d-29e380a6d757" containerID="670b5f67265dd16f161391e043545e897bb14e0d60e0124adb4c2c0d4a01a98e" exitCode=1 Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.231995 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" event={"ID":"12f70ae4-14e2-4eed-9c1d-29e380a6d757","Type":"ContainerDied","Data":"670b5f67265dd16f161391e043545e897bb14e0d60e0124adb4c2c0d4a01a98e"} Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.232932 4932 scope.go:117] "RemoveContainer" containerID="670b5f67265dd16f161391e043545e897bb14e0d60e0124adb4c2c0d4a01a98e" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.234144 4932 generic.go:334] "Generic (PLEG): container finished" podID="dae34761-581e-4f65-8d7c-d6c2d302b4f7" containerID="1374e9de03345c43f46af7f7cde3289869915310569e550bc673641cdfb01c6d" exitCode=1 Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.234210 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" event={"ID":"dae34761-581e-4f65-8d7c-d6c2d302b4f7","Type":"ContainerDied","Data":"1374e9de03345c43f46af7f7cde3289869915310569e550bc673641cdfb01c6d"} Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.234611 4932 scope.go:117] "RemoveContainer" containerID="1374e9de03345c43f46af7f7cde3289869915310569e550bc673641cdfb01c6d" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.236509 4932 generic.go:334] "Generic (PLEG): container finished" podID="45ebb480-733b-47a3-a682-8fe0be16eb78" containerID="41061e6d0c38484f6b609924cf9a1c5a91be7d691bcc15aed58391a019a70189" exitCode=1 Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.236550 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" event={"ID":"45ebb480-733b-47a3-a682-8fe0be16eb78","Type":"ContainerDied","Data":"41061e6d0c38484f6b609924cf9a1c5a91be7d691bcc15aed58391a019a70189"} Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.236863 4932 scope.go:117] "RemoveContainer" containerID="41061e6d0c38484f6b609924cf9a1c5a91be7d691bcc15aed58391a019a70189" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.249967 4932 generic.go:334] "Generic (PLEG): container finished" podID="a92ad4a6-d922-45c1-b02d-f382b1ea1cc0" containerID="722efff839e6dcd8a6d64e9325b7e83a9e3c202123e0da6c24ae3f271cb1dfce" exitCode=1 Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.250106 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" event={"ID":"a92ad4a6-d922-45c1-b02d-f382b1ea1cc0","Type":"ContainerDied","Data":"722efff839e6dcd8a6d64e9325b7e83a9e3c202123e0da6c24ae3f271cb1dfce"} Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.259180 4932 scope.go:117] "RemoveContainer" containerID="722efff839e6dcd8a6d64e9325b7e83a9e3c202123e0da6c24ae3f271cb1dfce" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.289262 4932 generic.go:334] "Generic (PLEG): container finished" podID="3aadd9b8-da59-45e3-979b-ac4896561d6c" containerID="c8ab4115cd41bb1ce7e7d23045f95f7d210eaea00b7026a3c55d0d2c593dfa17" exitCode=1 Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.289412 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" event={"ID":"3aadd9b8-da59-45e3-979b-ac4896561d6c","Type":"ContainerDied","Data":"c8ab4115cd41bb1ce7e7d23045f95f7d210eaea00b7026a3c55d0d2c593dfa17"} Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.292526 4932 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f3bab695-50e7-421a-a4ff-901ab01fd6e7" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.292549 4932 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f3bab695-50e7-421a-a4ff-901ab01fd6e7" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.293122 4932 scope.go:117] "RemoveContainer" containerID="c8ab4115cd41bb1ce7e7d23045f95f7d210eaea00b7026a3c55d0d2c593dfa17" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.299595 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:13:13 crc kubenswrapper[4932]: I1125 11:13:13.435621 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="36f37df9-c41b-41cb-8afa-f7e92f1ec0a1" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.153860 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.154120 4932 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.154363 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.303836 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" event={"ID":"3aadd9b8-da59-45e3-979b-ac4896561d6c","Type":"ContainerStarted","Data":"66c2367047680c1bcc9ae80474b1517a8beb357b8ac327369abe55be05bcd736"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.304101 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.307272 4932 generic.go:334] "Generic (PLEG): container finished" podID="bde38973-f401-4917-8abc-08dafaf8f10c" containerID="72fadcec50c8804239125f2a46e1c8627697eca76284b96988eb75e0a322d238" exitCode=1 Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.307402 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" event={"ID":"bde38973-f401-4917-8abc-08dafaf8f10c","Type":"ContainerDied","Data":"72fadcec50c8804239125f2a46e1c8627697eca76284b96988eb75e0a322d238"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.308216 4932 scope.go:117] "RemoveContainer" containerID="72fadcec50c8804239125f2a46e1c8627697eca76284b96988eb75e0a322d238" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.310743 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" event={"ID":"96d031ad-3550-4423-9422-93911c9a8217","Type":"ContainerStarted","Data":"69afe2c47b7ffb15fcbeacc769b33e8ad1dbcb4063050ff08e9088ffbab1713e"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.310954 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.313171 4932 generic.go:334] "Generic (PLEG): container finished" podID="6fcca084-72cb-48ba-948f-6c4d861f6096" containerID="8d7f2ae2f105799891d00e8724d9b7be9af07072f86d9c09ab7398bdd31c6fcd" exitCode=1 Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.313229 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" event={"ID":"6fcca084-72cb-48ba-948f-6c4d861f6096","Type":"ContainerDied","Data":"8d7f2ae2f105799891d00e8724d9b7be9af07072f86d9c09ab7398bdd31c6fcd"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.313787 4932 scope.go:117] "RemoveContainer" containerID="8d7f2ae2f105799891d00e8724d9b7be9af07072f86d9c09ab7398bdd31c6fcd" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.319989 4932 generic.go:334] "Generic (PLEG): container finished" podID="243ff257-9836-4e43-9228-e05f18282650" containerID="620fdc212be8c877dc6a1da7ca0be213975cdbd9c958c8187bbb631b299eb7b0" exitCode=1 Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.320058 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" event={"ID":"243ff257-9836-4e43-9228-e05f18282650","Type":"ContainerDied","Data":"620fdc212be8c877dc6a1da7ca0be213975cdbd9c958c8187bbb631b299eb7b0"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.320873 4932 scope.go:117] "RemoveContainer" containerID="620fdc212be8c877dc6a1da7ca0be213975cdbd9c958c8187bbb631b299eb7b0" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.324707 4932 generic.go:334] "Generic (PLEG): container finished" podID="65fb5603-367e-431f-a8d3-0a3281a70361" containerID="db2d2d4cca139b24fe19a0750feaae79ed46b4e30cd3e137fa4990a5f8f36fae" exitCode=1 Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.324773 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" event={"ID":"65fb5603-367e-431f-a8d3-0a3281a70361","Type":"ContainerDied","Data":"db2d2d4cca139b24fe19a0750feaae79ed46b4e30cd3e137fa4990a5f8f36fae"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.324832 4932 scope.go:117] "RemoveContainer" containerID="a122f3f5d243279d93b07ffeaacd6c6d1cc73f4a0d88687eeaa2ef201d308db1" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.325709 4932 scope.go:117] "RemoveContainer" containerID="db2d2d4cca139b24fe19a0750feaae79ed46b4e30cd3e137fa4990a5f8f36fae" Nov 25 11:13:14 crc kubenswrapper[4932]: E1125 11:13:14.326073 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-jg9pn_openstack-operators(65fb5603-367e-431f-a8d3-0a3281a70361)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" podUID="65fb5603-367e-431f-a8d3-0a3281a70361" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.329025 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" event={"ID":"bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd","Type":"ContainerStarted","Data":"0df0b6e1d926aa7ad052b9bf1c62ef915500bbe1f2050a184b8b05caf5a1a124"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.329486 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.332107 4932 generic.go:334] "Generic (PLEG): container finished" podID="d2216d92-9e2d-4549-b634-63ec3ada9f14" containerID="fc9cb63539261e590f698e85996887fd7cac7bbff2609d3a62115b725ab712de" exitCode=1 Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.332283 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" event={"ID":"d2216d92-9e2d-4549-b634-63ec3ada9f14","Type":"ContainerDied","Data":"fc9cb63539261e590f698e85996887fd7cac7bbff2609d3a62115b725ab712de"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.332805 4932 scope.go:117] "RemoveContainer" containerID="fc9cb63539261e590f698e85996887fd7cac7bbff2609d3a62115b725ab712de" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.337852 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" event={"ID":"45ebb480-733b-47a3-a682-8fe0be16eb78","Type":"ContainerStarted","Data":"8bc6203648518a3b687346a4807cd4f2ae21ba51b9995343801a123d9b2742d6"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.339028 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.341140 4932 generic.go:334] "Generic (PLEG): container finished" podID="8c014265-53e2-4c4d-9c25-452686712f2e" containerID="edb768adb7ff4cf663301d1b8c996188c33db672b1babf3414708b15e6d1259d" exitCode=1 Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.341190 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" event={"ID":"8c014265-53e2-4c4d-9c25-452686712f2e","Type":"ContainerDied","Data":"edb768adb7ff4cf663301d1b8c996188c33db672b1babf3414708b15e6d1259d"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.342189 4932 scope.go:117] "RemoveContainer" containerID="edb768adb7ff4cf663301d1b8c996188c33db672b1babf3414708b15e6d1259d" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.345143 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" event={"ID":"a92ad4a6-d922-45c1-b02d-f382b1ea1cc0","Type":"ContainerStarted","Data":"d3206633dca19f19340f17b3188cb853f2f6351a89ec9fc90bca8ddf294928e7"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.345596 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.357498 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" event={"ID":"af96b4c7-e9eb-4609-afab-ba3cc15f0a48","Type":"ContainerStarted","Data":"cb14d4d6d55c9748f99327d7c03fdee060f3f08b2e093cc6c0d73a2f4825773a"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.358903 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.362493 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" event={"ID":"12f70ae4-14e2-4eed-9c1d-29e380a6d757","Type":"ContainerStarted","Data":"717bd6085399aed5ee34848934bc654dc64e91e981ca300f2881dc7b3fba2379"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.362955 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.368263 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" event={"ID":"dae34761-581e-4f65-8d7c-d6c2d302b4f7","Type":"ContainerStarted","Data":"78b6b2bd71ed54beb3222ebf166814ae4c43cfe70040bd8c6759800db4fbe9ee"} Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.368917 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.369040 4932 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f3bab695-50e7-421a-a4ff-901ab01fd6e7" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.369086 4932 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f3bab695-50e7-421a-a4ff-901ab01fd6e7" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.936204 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 11:13:14 crc kubenswrapper[4932]: I1125 11:13:14.937376 4932 scope.go:117] "RemoveContainer" containerID="4f1001b7b9224e3afce6c4b710f1810a65e4769f19ad248c9bc6aee202143ed1" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.382599 4932 generic.go:334] "Generic (PLEG): container finished" podID="a92ad4a6-d922-45c1-b02d-f382b1ea1cc0" containerID="d3206633dca19f19340f17b3188cb853f2f6351a89ec9fc90bca8ddf294928e7" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.382697 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" event={"ID":"a92ad4a6-d922-45c1-b02d-f382b1ea1cc0","Type":"ContainerDied","Data":"d3206633dca19f19340f17b3188cb853f2f6351a89ec9fc90bca8ddf294928e7"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.382960 4932 scope.go:117] "RemoveContainer" containerID="722efff839e6dcd8a6d64e9325b7e83a9e3c202123e0da6c24ae3f271cb1dfce" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.383389 4932 scope.go:117] "RemoveContainer" containerID="d3206633dca19f19340f17b3188cb853f2f6351a89ec9fc90bca8ddf294928e7" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.383698 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-m2tpg_openstack-operators(a92ad4a6-d922-45c1-b02d-f382b1ea1cc0)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" podUID="a92ad4a6-d922-45c1-b02d-f382b1ea1cc0" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.386901 4932 generic.go:334] "Generic (PLEG): container finished" podID="e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d" containerID="dfb5b7ed45088edf33bf7ff338fb1a32825aceda9374ead836206731c65d6034" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.386959 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" event={"ID":"e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d","Type":"ContainerDied","Data":"dfb5b7ed45088edf33bf7ff338fb1a32825aceda9374ead836206731c65d6034"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.387613 4932 scope.go:117] "RemoveContainer" containerID="dfb5b7ed45088edf33bf7ff338fb1a32825aceda9374ead836206731c65d6034" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.388642 4932 generic.go:334] "Generic (PLEG): container finished" podID="1b5af146-d2d1-4526-8a10-84ebc35baca8" containerID="3f5b0313d6e96c29af56708f8b962ed783dbe6ab8c69c7a4e6d6b4f5f04ae7b1" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.388706 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" event={"ID":"1b5af146-d2d1-4526-8a10-84ebc35baca8","Type":"ContainerDied","Data":"3f5b0313d6e96c29af56708f8b962ed783dbe6ab8c69c7a4e6d6b4f5f04ae7b1"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.389102 4932 scope.go:117] "RemoveContainer" containerID="3f5b0313d6e96c29af56708f8b962ed783dbe6ab8c69c7a4e6d6b4f5f04ae7b1" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.391815 4932 generic.go:334] "Generic (PLEG): container finished" podID="243ff257-9836-4e43-9228-e05f18282650" containerID="1310bdba5d097d269e7743954f682bb124356c19b293e9ec1d7b616bda5011db" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.391876 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" event={"ID":"243ff257-9836-4e43-9228-e05f18282650","Type":"ContainerDied","Data":"1310bdba5d097d269e7743954f682bb124356c19b293e9ec1d7b616bda5011db"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.392418 4932 scope.go:117] "RemoveContainer" containerID="1310bdba5d097d269e7743954f682bb124356c19b293e9ec1d7b616bda5011db" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.392735 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-cwqvg_openstack-operators(243ff257-9836-4e43-9228-e05f18282650)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" podUID="243ff257-9836-4e43-9228-e05f18282650" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.394548 4932 generic.go:334] "Generic (PLEG): container finished" podID="d2216d92-9e2d-4549-b634-63ec3ada9f14" containerID="2f3c15cefdb88949311914ce11f1d57b7f443ce681cb20aaeb6e617e8b8b6e3e" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.394604 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" event={"ID":"d2216d92-9e2d-4549-b634-63ec3ada9f14","Type":"ContainerDied","Data":"2f3c15cefdb88949311914ce11f1d57b7f443ce681cb20aaeb6e617e8b8b6e3e"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.395117 4932 scope.go:117] "RemoveContainer" containerID="2f3c15cefdb88949311914ce11f1d57b7f443ce681cb20aaeb6e617e8b8b6e3e" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.395438 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-bk2nv_openstack-operators(d2216d92-9e2d-4549-b634-63ec3ada9f14)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" podUID="d2216d92-9e2d-4549-b634-63ec3ada9f14" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.398481 4932 generic.go:334] "Generic (PLEG): container finished" podID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" containerID="78202d006338b185ad300800279cc203ec2b3a603a818b3c1667f12a4c7afe7a" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.398909 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" event={"ID":"b7db3ea2-66e6-46f2-93b4-4c8405a1b566","Type":"ContainerDied","Data":"78202d006338b185ad300800279cc203ec2b3a603a818b3c1667f12a4c7afe7a"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.399694 4932 scope.go:117] "RemoveContainer" containerID="78202d006338b185ad300800279cc203ec2b3a603a818b3c1667f12a4c7afe7a" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.399938 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-76dcd9496-2bqxl_metallb-system(b7db3ea2-66e6-46f2-93b4-4c8405a1b566)\"" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" podUID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.402765 4932 generic.go:334] "Generic (PLEG): container finished" podID="45ebb480-733b-47a3-a682-8fe0be16eb78" containerID="8bc6203648518a3b687346a4807cd4f2ae21ba51b9995343801a123d9b2742d6" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.402827 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" event={"ID":"45ebb480-733b-47a3-a682-8fe0be16eb78","Type":"ContainerDied","Data":"8bc6203648518a3b687346a4807cd4f2ae21ba51b9995343801a123d9b2742d6"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.403628 4932 scope.go:117] "RemoveContainer" containerID="8bc6203648518a3b687346a4807cd4f2ae21ba51b9995343801a123d9b2742d6" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.403954 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-pkjjd_openstack-operators(45ebb480-733b-47a3-a682-8fe0be16eb78)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podUID="45ebb480-733b-47a3-a682-8fe0be16eb78" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.410751 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" event={"ID":"8c014265-53e2-4c4d-9c25-452686712f2e","Type":"ContainerStarted","Data":"21f9b1e0cd09c8c481cfc9a920f21f304b5f643e0b4190b9ffbb40b0ca4fbba1"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.411976 4932 scope.go:117] "RemoveContainer" containerID="21f9b1e0cd09c8c481cfc9a920f21f304b5f643e0b4190b9ffbb40b0ca4fbba1" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.412371 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-4wph8_openstack-operators(8c014265-53e2-4c4d-9c25-452686712f2e)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" podUID="8c014265-53e2-4c4d-9c25-452686712f2e" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.433646 4932 generic.go:334] "Generic (PLEG): container finished" podID="af96b4c7-e9eb-4609-afab-ba3cc15f0a48" containerID="cb14d4d6d55c9748f99327d7c03fdee060f3f08b2e093cc6c0d73a2f4825773a" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.433765 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" event={"ID":"af96b4c7-e9eb-4609-afab-ba3cc15f0a48","Type":"ContainerDied","Data":"cb14d4d6d55c9748f99327d7c03fdee060f3f08b2e093cc6c0d73a2f4825773a"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.434650 4932 scope.go:117] "RemoveContainer" containerID="cb14d4d6d55c9748f99327d7c03fdee060f3f08b2e093cc6c0d73a2f4825773a" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.435228 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-rcv5q_openstack-operators(af96b4c7-e9eb-4609-afab-ba3cc15f0a48)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" podUID="af96b4c7-e9eb-4609-afab-ba3cc15f0a48" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.440420 4932 generic.go:334] "Generic (PLEG): container finished" podID="12f70ae4-14e2-4eed-9c1d-29e380a6d757" containerID="717bd6085399aed5ee34848934bc654dc64e91e981ca300f2881dc7b3fba2379" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.440490 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" event={"ID":"12f70ae4-14e2-4eed-9c1d-29e380a6d757","Type":"ContainerDied","Data":"717bd6085399aed5ee34848934bc654dc64e91e981ca300f2881dc7b3fba2379"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.441327 4932 scope.go:117] "RemoveContainer" containerID="717bd6085399aed5ee34848934bc654dc64e91e981ca300f2881dc7b3fba2379" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.441668 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-fjlpt_openstack-operators(12f70ae4-14e2-4eed-9c1d-29e380a6d757)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" podUID="12f70ae4-14e2-4eed-9c1d-29e380a6d757" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.447523 4932 generic.go:334] "Generic (PLEG): container finished" podID="bde38973-f401-4917-8abc-08dafaf8f10c" containerID="9b457159a21bb66dd3dd7116ff63bfa3461a21fd42462777c4194b0ca169a348" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.447628 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" event={"ID":"bde38973-f401-4917-8abc-08dafaf8f10c","Type":"ContainerDied","Data":"9b457159a21bb66dd3dd7116ff63bfa3461a21fd42462777c4194b0ca169a348"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.449642 4932 scope.go:117] "RemoveContainer" containerID="9b457159a21bb66dd3dd7116ff63bfa3461a21fd42462777c4194b0ca169a348" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.449987 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-68b95954c9-x4l6r_openstack-operators(bde38973-f401-4917-8abc-08dafaf8f10c)\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" podUID="bde38973-f401-4917-8abc-08dafaf8f10c" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.455529 4932 generic.go:334] "Generic (PLEG): container finished" podID="070a395c-8ac5-4303-80fb-7f93282a9f99" containerID="c64b72dae063c07f045dfc23b33e2462f956b03d635d6f660129577b2d2e6066" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.455618 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" event={"ID":"070a395c-8ac5-4303-80fb-7f93282a9f99","Type":"ContainerDied","Data":"c64b72dae063c07f045dfc23b33e2462f956b03d635d6f660129577b2d2e6066"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.456498 4932 scope.go:117] "RemoveContainer" containerID="c64b72dae063c07f045dfc23b33e2462f956b03d635d6f660129577b2d2e6066" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.457642 4932 generic.go:334] "Generic (PLEG): container finished" podID="58b17ce6-9e76-4007-ac84-d59d6c3c38d2" containerID="74f7d8e83295f2bc7a2087b674e4f8a1e22c6cb43d6f4d1404ccdc4beccfca28" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.457728 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" event={"ID":"58b17ce6-9e76-4007-ac84-d59d6c3c38d2","Type":"ContainerDied","Data":"74f7d8e83295f2bc7a2087b674e4f8a1e22c6cb43d6f4d1404ccdc4beccfca28"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.458177 4932 scope.go:117] "RemoveContainer" containerID="74f7d8e83295f2bc7a2087b674e4f8a1e22c6cb43d6f4d1404ccdc4beccfca28" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.462167 4932 generic.go:334] "Generic (PLEG): container finished" podID="bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd" containerID="0df0b6e1d926aa7ad052b9bf1c62ef915500bbe1f2050a184b8b05caf5a1a124" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.462225 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" event={"ID":"bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd","Type":"ContainerDied","Data":"0df0b6e1d926aa7ad052b9bf1c62ef915500bbe1f2050a184b8b05caf5a1a124"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.463189 4932 scope.go:117] "RemoveContainer" containerID="0df0b6e1d926aa7ad052b9bf1c62ef915500bbe1f2050a184b8b05caf5a1a124" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.463533 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-t6t6s_openstack-operators(bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" podUID="bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.467671 4932 generic.go:334] "Generic (PLEG): container finished" podID="dae34761-581e-4f65-8d7c-d6c2d302b4f7" containerID="78b6b2bd71ed54beb3222ebf166814ae4c43cfe70040bd8c6759800db4fbe9ee" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.467752 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" event={"ID":"dae34761-581e-4f65-8d7c-d6c2d302b4f7","Type":"ContainerDied","Data":"78b6b2bd71ed54beb3222ebf166814ae4c43cfe70040bd8c6759800db4fbe9ee"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.468527 4932 scope.go:117] "RemoveContainer" containerID="78b6b2bd71ed54beb3222ebf166814ae4c43cfe70040bd8c6759800db4fbe9ee" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.468934 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-q7rt6_openstack-operators(dae34761-581e-4f65-8d7c-d6c2d302b4f7)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" podUID="dae34761-581e-4f65-8d7c-d6c2d302b4f7" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.473776 4932 generic.go:334] "Generic (PLEG): container finished" podID="765f296f-cd42-4f2c-9b21-2bcbc65d490c" containerID="aa560081a8fee703b3ea68394c811794ee09b408bd97475698ae11c8a3440f5e" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.473859 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" event={"ID":"765f296f-cd42-4f2c-9b21-2bcbc65d490c","Type":"ContainerDied","Data":"aa560081a8fee703b3ea68394c811794ee09b408bd97475698ae11c8a3440f5e"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.475947 4932 scope.go:117] "RemoveContainer" containerID="aa560081a8fee703b3ea68394c811794ee09b408bd97475698ae11c8a3440f5e" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.478469 4932 generic.go:334] "Generic (PLEG): container finished" podID="96d031ad-3550-4423-9422-93911c9a8217" containerID="69afe2c47b7ffb15fcbeacc769b33e8ad1dbcb4063050ff08e9088ffbab1713e" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.478867 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" event={"ID":"96d031ad-3550-4423-9422-93911c9a8217","Type":"ContainerDied","Data":"69afe2c47b7ffb15fcbeacc769b33e8ad1dbcb4063050ff08e9088ffbab1713e"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.479078 4932 scope.go:117] "RemoveContainer" containerID="69afe2c47b7ffb15fcbeacc769b33e8ad1dbcb4063050ff08e9088ffbab1713e" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.479373 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-ps52v_openstack-operators(96d031ad-3550-4423-9422-93911c9a8217)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" podUID="96d031ad-3550-4423-9422-93911c9a8217" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.485071 4932 generic.go:334] "Generic (PLEG): container finished" podID="66b0ef7a-14c8-4702-8e52-67809a677880" containerID="d67a4e59bfeba476e11186a332f647f8f5a5a7dd1d6445ad889dd9cdc0be3480" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.485138 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" event={"ID":"66b0ef7a-14c8-4702-8e52-67809a677880","Type":"ContainerDied","Data":"d67a4e59bfeba476e11186a332f647f8f5a5a7dd1d6445ad889dd9cdc0be3480"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.485940 4932 scope.go:117] "RemoveContainer" containerID="d67a4e59bfeba476e11186a332f647f8f5a5a7dd1d6445ad889dd9cdc0be3480" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.489399 4932 generic.go:334] "Generic (PLEG): container finished" podID="6fcca084-72cb-48ba-948f-6c4d861f6096" containerID="17e1be5cb241a41deb5ec7091aab21a54f05e52dd54efcffa68558d08521aaeb" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.489443 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" event={"ID":"6fcca084-72cb-48ba-948f-6c4d861f6096","Type":"ContainerDied","Data":"17e1be5cb241a41deb5ec7091aab21a54f05e52dd54efcffa68558d08521aaeb"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.489832 4932 scope.go:117] "RemoveContainer" containerID="17e1be5cb241a41deb5ec7091aab21a54f05e52dd54efcffa68558d08521aaeb" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.490075 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-phkzd_openstack-operators(6fcca084-72cb-48ba-948f-6c4d861f6096)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" podUID="6fcca084-72cb-48ba-948f-6c4d861f6096" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.491719 4932 generic.go:334] "Generic (PLEG): container finished" podID="6dedf441-145d-4642-a0f0-fb691d2edd2d" containerID="f9d685fb12af160c9b6f06cc5130b3243cdad011655cade2637093fe044c0f38" exitCode=1 Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.491989 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" event={"ID":"6dedf441-145d-4642-a0f0-fb691d2edd2d","Type":"ContainerDied","Data":"f9d685fb12af160c9b6f06cc5130b3243cdad011655cade2637093fe044c0f38"} Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.492782 4932 scope.go:117] "RemoveContainer" containerID="f9d685fb12af160c9b6f06cc5130b3243cdad011655cade2637093fe044c0f38" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.662292 4932 scope.go:117] "RemoveContainer" containerID="620fdc212be8c877dc6a1da7ca0be213975cdbd9c958c8187bbb631b299eb7b0" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.739865 4932 scope.go:117] "RemoveContainer" containerID="fc9cb63539261e590f698e85996887fd7cac7bbff2609d3a62115b725ab712de" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.808560 4932 scope.go:117] "RemoveContainer" containerID="4f1001b7b9224e3afce6c4b710f1810a65e4769f19ad248c9bc6aee202143ed1" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.836726 4932 scope.go:117] "RemoveContainer" containerID="41061e6d0c38484f6b609924cf9a1c5a91be7d691bcc15aed58391a019a70189" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.866696 4932 scope.go:117] "RemoveContainer" containerID="d76a49c47f51346babbace38abde4866130679b37e8786aa1c6aaf826f3af8b1" Nov 25 11:13:15 crc kubenswrapper[4932]: E1125 11:13:15.902369 4932 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6c508686_35cb_4c09_8ee6_2c655072d7d3.slice/crio-conmon-721d0fdc5df3659f4d894e79772aabcb99cd77528ae02221faa20f6c47d48bd2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8c014265_53e2_4c4d_9c25_452686712f2e.slice/crio-conmon-21f9b1e0cd09c8c481cfc9a920f21f304b5f643e0b4190b9ffbb40b0ca4fbba1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6c508686_35cb_4c09_8ee6_2c655072d7d3.slice/crio-721d0fdc5df3659f4d894e79772aabcb99cd77528ae02221faa20f6c47d48bd2.scope\": RecentStats: unable to find data in memory cache]" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.909147 4932 scope.go:117] "RemoveContainer" containerID="670b5f67265dd16f161391e043545e897bb14e0d60e0124adb4c2c0d4a01a98e" Nov 25 11:13:15 crc kubenswrapper[4932]: I1125 11:13:15.952700 4932 scope.go:117] "RemoveContainer" containerID="72fadcec50c8804239125f2a46e1c8627697eca76284b96988eb75e0a322d238" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.108281 4932 scope.go:117] "RemoveContainer" containerID="6bbe52121c4b99bd1b164c8815329292b7368a45d866693592376057fe50d359" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.161164 4932 scope.go:117] "RemoveContainer" containerID="1374e9de03345c43f46af7f7cde3289869915310569e550bc673641cdfb01c6d" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.197909 4932 scope.go:117] "RemoveContainer" containerID="e085beb3bbc4f9973ec8d9f647374f6a5724be783eb3ad3796a5cc06e35a1200" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.246498 4932 scope.go:117] "RemoveContainer" containerID="8d7f2ae2f105799891d00e8724d9b7be9af07072f86d9c09ab7398bdd31c6fcd" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.506069 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" event={"ID":"1b5af146-d2d1-4526-8a10-84ebc35baca8","Type":"ContainerStarted","Data":"5a30c98501b01fe8e778fbc0442397c643b58ac64e7618fe1570f411888ba896"} Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.511602 4932 scope.go:117] "RemoveContainer" containerID="78b6b2bd71ed54beb3222ebf166814ae4c43cfe70040bd8c6759800db4fbe9ee" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.512287 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-q7rt6_openstack-operators(dae34761-581e-4f65-8d7c-d6c2d302b4f7)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" podUID="dae34761-581e-4f65-8d7c-d6c2d302b4f7" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.514234 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" event={"ID":"765f296f-cd42-4f2c-9b21-2bcbc65d490c","Type":"ContainerStarted","Data":"da23984aafc361b5cf78fd93a16bb641b17848344a292b41d71ccbd50eedc9b8"} Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.514409 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.518688 4932 scope.go:117] "RemoveContainer" containerID="69afe2c47b7ffb15fcbeacc769b33e8ad1dbcb4063050ff08e9088ffbab1713e" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.518951 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-ps52v_openstack-operators(96d031ad-3550-4423-9422-93911c9a8217)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" podUID="96d031ad-3550-4423-9422-93911c9a8217" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.523068 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" event={"ID":"6dedf441-145d-4642-a0f0-fb691d2edd2d","Type":"ContainerStarted","Data":"d0887b6d91e1075b08b890160d43a8859a50e7e6589e95b7f72ac08efa5f8dac"} Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.523539 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.526965 4932 scope.go:117] "RemoveContainer" containerID="717bd6085399aed5ee34848934bc654dc64e91e981ca300f2881dc7b3fba2379" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.527324 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-fjlpt_openstack-operators(12f70ae4-14e2-4eed-9c1d-29e380a6d757)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" podUID="12f70ae4-14e2-4eed-9c1d-29e380a6d757" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.533051 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" event={"ID":"070a395c-8ac5-4303-80fb-7f93282a9f99","Type":"ContainerStarted","Data":"8edade2dfc90e89f4af3626615b53f4f8b21225aab41ae194074f301680146e5"} Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.533286 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.537744 4932 generic.go:334] "Generic (PLEG): container finished" podID="695ce8a3-6a30-42a4-8ba2-f6309470362c" containerID="486c1f17e94d617e3e0daf3f196112ae9c290336e9a050cd7125e0ef942fc0f5" exitCode=1 Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.537816 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" event={"ID":"695ce8a3-6a30-42a4-8ba2-f6309470362c","Type":"ContainerDied","Data":"486c1f17e94d617e3e0daf3f196112ae9c290336e9a050cd7125e0ef942fc0f5"} Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.538650 4932 scope.go:117] "RemoveContainer" containerID="486c1f17e94d617e3e0daf3f196112ae9c290336e9a050cd7125e0ef942fc0f5" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.540836 4932 scope.go:117] "RemoveContainer" containerID="8bc6203648518a3b687346a4807cd4f2ae21ba51b9995343801a123d9b2742d6" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.541095 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-pkjjd_openstack-operators(45ebb480-733b-47a3-a682-8fe0be16eb78)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podUID="45ebb480-733b-47a3-a682-8fe0be16eb78" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.542769 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" event={"ID":"66b0ef7a-14c8-4702-8e52-67809a677880","Type":"ContainerStarted","Data":"0f4638b37c0633115650078c32626b083c4fe815278a256bdf196366c7ccc97c"} Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.543368 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.547943 4932 scope.go:117] "RemoveContainer" containerID="0df0b6e1d926aa7ad052b9bf1c62ef915500bbe1f2050a184b8b05caf5a1a124" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.548256 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-t6t6s_openstack-operators(bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" podUID="bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.549610 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" event={"ID":"e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d","Type":"ContainerStarted","Data":"77241dd472a7452e0c2309e9174d92e08cfa692c88e66603e190283ab32c6796"} Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.549836 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.556867 4932 scope.go:117] "RemoveContainer" containerID="cb14d4d6d55c9748f99327d7c03fdee060f3f08b2e093cc6c0d73a2f4825773a" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.557318 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-rcv5q_openstack-operators(af96b4c7-e9eb-4609-afab-ba3cc15f0a48)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" podUID="af96b4c7-e9eb-4609-afab-ba3cc15f0a48" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.559376 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" event={"ID":"58b17ce6-9e76-4007-ac84-d59d6c3c38d2","Type":"ContainerStarted","Data":"1cf7018aec97689f6bd5c7c8b486ad9a4c479cc8948a5905dabc6588f66b3890"} Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.559885 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.563831 4932 scope.go:117] "RemoveContainer" containerID="d3206633dca19f19340f17b3188cb853f2f6351a89ec9fc90bca8ddf294928e7" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.564269 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-m2tpg_openstack-operators(a92ad4a6-d922-45c1-b02d-f382b1ea1cc0)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" podUID="a92ad4a6-d922-45c1-b02d-f382b1ea1cc0" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.565285 4932 generic.go:334] "Generic (PLEG): container finished" podID="6c508686-35cb-4c09-8ee6-2c655072d7d3" containerID="721d0fdc5df3659f4d894e79772aabcb99cd77528ae02221faa20f6c47d48bd2" exitCode=1 Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.565339 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" event={"ID":"6c508686-35cb-4c09-8ee6-2c655072d7d3","Type":"ContainerDied","Data":"721d0fdc5df3659f4d894e79772aabcb99cd77528ae02221faa20f6c47d48bd2"} Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.565709 4932 scope.go:117] "RemoveContainer" containerID="721d0fdc5df3659f4d894e79772aabcb99cd77528ae02221faa20f6c47d48bd2" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.585973 4932 generic.go:334] "Generic (PLEG): container finished" podID="8c014265-53e2-4c4d-9c25-452686712f2e" containerID="21f9b1e0cd09c8c481cfc9a920f21f304b5f643e0b4190b9ffbb40b0ca4fbba1" exitCode=1 Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.586029 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" event={"ID":"8c014265-53e2-4c4d-9c25-452686712f2e","Type":"ContainerDied","Data":"21f9b1e0cd09c8c481cfc9a920f21f304b5f643e0b4190b9ffbb40b0ca4fbba1"} Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.586062 4932 scope.go:117] "RemoveContainer" containerID="edb768adb7ff4cf663301d1b8c996188c33db672b1babf3414708b15e6d1259d" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.587093 4932 scope.go:117] "RemoveContainer" containerID="21f9b1e0cd09c8c481cfc9a920f21f304b5f643e0b4190b9ffbb40b0ca4fbba1" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.587482 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-4wph8_openstack-operators(8c014265-53e2-4c4d-9c25-452686712f2e)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" podUID="8c014265-53e2-4c4d-9c25-452686712f2e" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.594280 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.595924 4932 scope.go:117] "RemoveContainer" containerID="2f3c15cefdb88949311914ce11f1d57b7f443ce681cb20aaeb6e617e8b8b6e3e" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.596278 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-bk2nv_openstack-operators(d2216d92-9e2d-4549-b634-63ec3ada9f14)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" podUID="d2216d92-9e2d-4549-b634-63ec3ada9f14" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.620728 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.621771 4932 scope.go:117] "RemoveContainer" containerID="db2d2d4cca139b24fe19a0750feaae79ed46b4e30cd3e137fa4990a5f8f36fae" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.622098 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-jg9pn_openstack-operators(65fb5603-367e-431f-a8d3-0a3281a70361)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" podUID="65fb5603-367e-431f-a8d3-0a3281a70361" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.831708 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.832532 4932 scope.go:117] "RemoveContainer" containerID="9b457159a21bb66dd3dd7116ff63bfa3461a21fd42462777c4194b0ca169a348" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.832789 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-68b95954c9-x4l6r_openstack-operators(bde38973-f401-4917-8abc-08dafaf8f10c)\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" podUID="bde38973-f401-4917-8abc-08dafaf8f10c" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.927010 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 11:13:16 crc kubenswrapper[4932]: I1125 11:13:16.928081 4932 scope.go:117] "RemoveContainer" containerID="17e1be5cb241a41deb5ec7091aab21a54f05e52dd54efcffa68558d08521aaeb" Nov 25 11:13:16 crc kubenswrapper[4932]: E1125 11:13:16.928416 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-phkzd_openstack-operators(6fcca084-72cb-48ba-948f-6c4d861f6096)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" podUID="6fcca084-72cb-48ba-948f-6c4d861f6096" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.119380 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.233394 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.234273 4932 scope.go:117] "RemoveContainer" containerID="1310bdba5d097d269e7743954f682bb124356c19b293e9ec1d7b616bda5011db" Nov 25 11:13:17 crc kubenswrapper[4932]: E1125 11:13:17.234536 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-cwqvg_openstack-operators(243ff257-9836-4e43-9228-e05f18282650)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" podUID="243ff257-9836-4e43-9228-e05f18282650" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.464582 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.465612 4932 scope.go:117] "RemoveContainer" containerID="8deeb5e2a244df4ff2b4ea87d99ae84d521f8ac30b6eb7f0915a625194d7a8b1" Nov 25 11:13:17 crc kubenswrapper[4932]: E1125 11:13:17.465913 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-tkjb4_openstack-operators(d4860edf-9f45-4dd2-8e35-7c3a4444370a)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" podUID="d4860edf-9f45-4dd2-8e35-7c3a4444370a" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.491630 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.597365 4932 generic.go:334] "Generic (PLEG): container finished" podID="1b5af146-d2d1-4526-8a10-84ebc35baca8" containerID="5a30c98501b01fe8e778fbc0442397c643b58ac64e7618fe1570f411888ba896" exitCode=1 Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.597420 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" event={"ID":"1b5af146-d2d1-4526-8a10-84ebc35baca8","Type":"ContainerDied","Data":"5a30c98501b01fe8e778fbc0442397c643b58ac64e7618fe1570f411888ba896"} Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.597488 4932 scope.go:117] "RemoveContainer" containerID="3f5b0313d6e96c29af56708f8b962ed783dbe6ab8c69c7a4e6d6b4f5f04ae7b1" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.598362 4932 scope.go:117] "RemoveContainer" containerID="5a30c98501b01fe8e778fbc0442397c643b58ac64e7618fe1570f411888ba896" Nov 25 11:13:17 crc kubenswrapper[4932]: E1125 11:13:17.598754 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-bwv87_openstack-operators(1b5af146-d2d1-4526-8a10-84ebc35baca8)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" podUID="1b5af146-d2d1-4526-8a10-84ebc35baca8" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.602095 4932 generic.go:334] "Generic (PLEG): container finished" podID="070a395c-8ac5-4303-80fb-7f93282a9f99" containerID="8edade2dfc90e89f4af3626615b53f4f8b21225aab41ae194074f301680146e5" exitCode=1 Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.602139 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" event={"ID":"070a395c-8ac5-4303-80fb-7f93282a9f99","Type":"ContainerDied","Data":"8edade2dfc90e89f4af3626615b53f4f8b21225aab41ae194074f301680146e5"} Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.602530 4932 scope.go:117] "RemoveContainer" containerID="8edade2dfc90e89f4af3626615b53f4f8b21225aab41ae194074f301680146e5" Nov 25 11:13:17 crc kubenswrapper[4932]: E1125 11:13:17.602783 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rm8qr_openstack-operators(070a395c-8ac5-4303-80fb-7f93282a9f99)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" podUID="070a395c-8ac5-4303-80fb-7f93282a9f99" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.604219 4932 generic.go:334] "Generic (PLEG): container finished" podID="66b0ef7a-14c8-4702-8e52-67809a677880" containerID="0f4638b37c0633115650078c32626b083c4fe815278a256bdf196366c7ccc97c" exitCode=1 Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.604289 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" event={"ID":"66b0ef7a-14c8-4702-8e52-67809a677880","Type":"ContainerDied","Data":"0f4638b37c0633115650078c32626b083c4fe815278a256bdf196366c7ccc97c"} Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.605014 4932 scope.go:117] "RemoveContainer" containerID="0f4638b37c0633115650078c32626b083c4fe815278a256bdf196366c7ccc97c" Nov 25 11:13:17 crc kubenswrapper[4932]: E1125 11:13:17.605370 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-7cd5954d9-bdswv_openstack-operators(66b0ef7a-14c8-4702-8e52-67809a677880)\"" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" podUID="66b0ef7a-14c8-4702-8e52-67809a677880" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.606793 4932 generic.go:334] "Generic (PLEG): container finished" podID="e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d" containerID="77241dd472a7452e0c2309e9174d92e08cfa692c88e66603e190283ab32c6796" exitCode=1 Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.607116 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" event={"ID":"e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d","Type":"ContainerDied","Data":"77241dd472a7452e0c2309e9174d92e08cfa692c88e66603e190283ab32c6796"} Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.607849 4932 scope.go:117] "RemoveContainer" containerID="77241dd472a7452e0c2309e9174d92e08cfa692c88e66603e190283ab32c6796" Nov 25 11:13:17 crc kubenswrapper[4932]: E1125 11:13:17.608197 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-bbmvf_openstack-operators(e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" podUID="e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.610002 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" event={"ID":"6c508686-35cb-4c09-8ee6-2c655072d7d3","Type":"ContainerStarted","Data":"e44fd0cdcccdd4d0d8814091b3c68395f4bc7aec14ddd9474784ed72ed699185"} Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.610289 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.614066 4932 generic.go:334] "Generic (PLEG): container finished" podID="6dedf441-145d-4642-a0f0-fb691d2edd2d" containerID="d0887b6d91e1075b08b890160d43a8859a50e7e6589e95b7f72ac08efa5f8dac" exitCode=1 Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.614159 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" event={"ID":"6dedf441-145d-4642-a0f0-fb691d2edd2d","Type":"ContainerDied","Data":"d0887b6d91e1075b08b890160d43a8859a50e7e6589e95b7f72ac08efa5f8dac"} Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.614573 4932 scope.go:117] "RemoveContainer" containerID="d0887b6d91e1075b08b890160d43a8859a50e7e6589e95b7f72ac08efa5f8dac" Nov 25 11:13:17 crc kubenswrapper[4932]: E1125 11:13:17.614888 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-b9l7b_openstack-operators(6dedf441-145d-4642-a0f0-fb691d2edd2d)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" podUID="6dedf441-145d-4642-a0f0-fb691d2edd2d" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.618848 4932 generic.go:334] "Generic (PLEG): container finished" podID="695ce8a3-6a30-42a4-8ba2-f6309470362c" containerID="79bd3780ca6b6ab2cc239b212a02253964baa33933f3692f3d4b77ba4ca66b08" exitCode=1 Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.618921 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" event={"ID":"695ce8a3-6a30-42a4-8ba2-f6309470362c","Type":"ContainerDied","Data":"79bd3780ca6b6ab2cc239b212a02253964baa33933f3692f3d4b77ba4ca66b08"} Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.620521 4932 scope.go:117] "RemoveContainer" containerID="79bd3780ca6b6ab2cc239b212a02253964baa33933f3692f3d4b77ba4ca66b08" Nov 25 11:13:17 crc kubenswrapper[4932]: E1125 11:13:17.621007 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-567f98c9d-blm28_openstack-operators(695ce8a3-6a30-42a4-8ba2-f6309470362c)\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" podUID="695ce8a3-6a30-42a4-8ba2-f6309470362c" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.624722 4932 generic.go:334] "Generic (PLEG): container finished" podID="765f296f-cd42-4f2c-9b21-2bcbc65d490c" containerID="da23984aafc361b5cf78fd93a16bb641b17848344a292b41d71ccbd50eedc9b8" exitCode=1 Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.624800 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" event={"ID":"765f296f-cd42-4f2c-9b21-2bcbc65d490c","Type":"ContainerDied","Data":"da23984aafc361b5cf78fd93a16bb641b17848344a292b41d71ccbd50eedc9b8"} Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.625484 4932 scope.go:117] "RemoveContainer" containerID="da23984aafc361b5cf78fd93a16bb641b17848344a292b41d71ccbd50eedc9b8" Nov 25 11:13:17 crc kubenswrapper[4932]: E1125 11:13:17.625767 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-d5cc86f4b-5bkct_openstack-operators(765f296f-cd42-4f2c-9b21-2bcbc65d490c)\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" podUID="765f296f-cd42-4f2c-9b21-2bcbc65d490c" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.628661 4932 scope.go:117] "RemoveContainer" containerID="21f9b1e0cd09c8c481cfc9a920f21f304b5f643e0b4190b9ffbb40b0ca4fbba1" Nov 25 11:13:17 crc kubenswrapper[4932]: E1125 11:13:17.628939 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-4wph8_openstack-operators(8c014265-53e2-4c4d-9c25-452686712f2e)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" podUID="8c014265-53e2-4c4d-9c25-452686712f2e" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.674709 4932 scope.go:117] "RemoveContainer" containerID="c64b72dae063c07f045dfc23b33e2462f956b03d635d6f660129577b2d2e6066" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.752930 4932 scope.go:117] "RemoveContainer" containerID="d67a4e59bfeba476e11186a332f647f8f5a5a7dd1d6445ad889dd9cdc0be3480" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.832664 4932 scope.go:117] "RemoveContainer" containerID="dfb5b7ed45088edf33bf7ff338fb1a32825aceda9374ead836206731c65d6034" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.899369 4932 scope.go:117] "RemoveContainer" containerID="f9d685fb12af160c9b6f06cc5130b3243cdad011655cade2637093fe044c0f38" Nov 25 11:13:17 crc kubenswrapper[4932]: I1125 11:13:17.973115 4932 scope.go:117] "RemoveContainer" containerID="486c1f17e94d617e3e0daf3f196112ae9c290336e9a050cd7125e0ef942fc0f5" Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.003770 4932 scope.go:117] "RemoveContainer" containerID="aa560081a8fee703b3ea68394c811794ee09b408bd97475698ae11c8a3440f5e" Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.465831 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/kube-state-metrics-0" podUID="92b56b05-8133-4f01-a855-7bb7f523b38c" containerName="kube-state-metrics" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.465902 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.466810 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-state-metrics" containerStatusID={"Type":"cri-o","ID":"8598d59fa4e850d8d77e7040939460516180bf95e4182af24e0dce8ca88dac92"} pod="openstack/kube-state-metrics-0" containerMessage="Container kube-state-metrics failed liveness probe, will be restarted" Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.466851 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="92b56b05-8133-4f01-a855-7bb7f523b38c" containerName="kube-state-metrics" containerID="cri-o://8598d59fa4e850d8d77e7040939460516180bf95e4182af24e0dce8ca88dac92" gracePeriod=30 Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.583495 4932 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.640289 4932 generic.go:334] "Generic (PLEG): container finished" podID="92b56b05-8133-4f01-a855-7bb7f523b38c" containerID="8598d59fa4e850d8d77e7040939460516180bf95e4182af24e0dce8ca88dac92" exitCode=2 Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.640376 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"92b56b05-8133-4f01-a855-7bb7f523b38c","Type":"ContainerDied","Data":"8598d59fa4e850d8d77e7040939460516180bf95e4182af24e0dce8ca88dac92"} Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.643121 4932 scope.go:117] "RemoveContainer" containerID="0f4638b37c0633115650078c32626b083c4fe815278a256bdf196366c7ccc97c" Nov 25 11:13:18 crc kubenswrapper[4932]: E1125 11:13:18.643527 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-7cd5954d9-bdswv_openstack-operators(66b0ef7a-14c8-4702-8e52-67809a677880)\"" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" podUID="66b0ef7a-14c8-4702-8e52-67809a677880" Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.647168 4932 scope.go:117] "RemoveContainer" containerID="d0887b6d91e1075b08b890160d43a8859a50e7e6589e95b7f72ac08efa5f8dac" Nov 25 11:13:18 crc kubenswrapper[4932]: E1125 11:13:18.647513 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-b9l7b_openstack-operators(6dedf441-145d-4642-a0f0-fb691d2edd2d)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" podUID="6dedf441-145d-4642-a0f0-fb691d2edd2d" Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.650237 4932 scope.go:117] "RemoveContainer" containerID="77241dd472a7452e0c2309e9174d92e08cfa692c88e66603e190283ab32c6796" Nov 25 11:13:18 crc kubenswrapper[4932]: E1125 11:13:18.650512 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-bbmvf_openstack-operators(e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" podUID="e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d" Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.656618 4932 scope.go:117] "RemoveContainer" containerID="8edade2dfc90e89f4af3626615b53f4f8b21225aab41ae194074f301680146e5" Nov 25 11:13:18 crc kubenswrapper[4932]: E1125 11:13:18.656946 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rm8qr_openstack-operators(070a395c-8ac5-4303-80fb-7f93282a9f99)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" podUID="070a395c-8ac5-4303-80fb-7f93282a9f99" Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.660772 4932 scope.go:117] "RemoveContainer" containerID="79bd3780ca6b6ab2cc239b212a02253964baa33933f3692f3d4b77ba4ca66b08" Nov 25 11:13:18 crc kubenswrapper[4932]: E1125 11:13:18.661094 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-567f98c9d-blm28_openstack-operators(695ce8a3-6a30-42a4-8ba2-f6309470362c)\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" podUID="695ce8a3-6a30-42a4-8ba2-f6309470362c" Nov 25 11:13:18 crc kubenswrapper[4932]: I1125 11:13:18.663073 4932 scope.go:117] "RemoveContainer" containerID="da23984aafc361b5cf78fd93a16bb641b17848344a292b41d71ccbd50eedc9b8" Nov 25 11:13:18 crc kubenswrapper[4932]: E1125 11:13:18.663424 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-d5cc86f4b-5bkct_openstack-operators(765f296f-cd42-4f2c-9b21-2bcbc65d490c)\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" podUID="765f296f-cd42-4f2c-9b21-2bcbc65d490c" Nov 25 11:13:19 crc kubenswrapper[4932]: I1125 11:13:19.674398 4932 generic.go:334] "Generic (PLEG): container finished" podID="92b56b05-8133-4f01-a855-7bb7f523b38c" containerID="df2cc21d556a7a160bc36de917da54664709d74450d261f08448424393f55c72" exitCode=1 Nov 25 11:13:19 crc kubenswrapper[4932]: I1125 11:13:19.674455 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"92b56b05-8133-4f01-a855-7bb7f523b38c","Type":"ContainerDied","Data":"df2cc21d556a7a160bc36de917da54664709d74450d261f08448424393f55c72"} Nov 25 11:13:19 crc kubenswrapper[4932]: I1125 11:13:19.675352 4932 scope.go:117] "RemoveContainer" containerID="df2cc21d556a7a160bc36de917da54664709d74450d261f08448424393f55c72" Nov 25 11:13:19 crc kubenswrapper[4932]: I1125 11:13:19.675480 4932 scope.go:117] "RemoveContainer" containerID="8598d59fa4e850d8d77e7040939460516180bf95e4182af24e0dce8ca88dac92" Nov 25 11:13:20 crc kubenswrapper[4932]: I1125 11:13:20.643071 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:13:20 crc kubenswrapper[4932]: E1125 11:13:20.643992 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:13:20 crc kubenswrapper[4932]: I1125 11:13:20.688012 4932 generic.go:334] "Generic (PLEG): container finished" podID="92b56b05-8133-4f01-a855-7bb7f523b38c" containerID="17897f06fab4c172257a616b48b13b26ffaa2180c75f533e1a8b11f1804ed610" exitCode=1 Nov 25 11:13:20 crc kubenswrapper[4932]: I1125 11:13:20.688078 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"92b56b05-8133-4f01-a855-7bb7f523b38c","Type":"ContainerDied","Data":"17897f06fab4c172257a616b48b13b26ffaa2180c75f533e1a8b11f1804ed610"} Nov 25 11:13:20 crc kubenswrapper[4932]: I1125 11:13:20.688123 4932 scope.go:117] "RemoveContainer" containerID="df2cc21d556a7a160bc36de917da54664709d74450d261f08448424393f55c72" Nov 25 11:13:20 crc kubenswrapper[4932]: I1125 11:13:20.691329 4932 scope.go:117] "RemoveContainer" containerID="17897f06fab4c172257a616b48b13b26ffaa2180c75f533e1a8b11f1804ed610" Nov 25 11:13:20 crc kubenswrapper[4932]: E1125 11:13:20.692083 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(92b56b05-8133-4f01-a855-7bb7f523b38c)\"" pod="openstack/kube-state-metrics-0" podUID="92b56b05-8133-4f01-a855-7bb7f523b38c" Nov 25 11:13:20 crc kubenswrapper[4932]: I1125 11:13:20.715071 4932 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="36f37df9-c41b-41cb-8afa-f7e92f1ec0a1" Nov 25 11:13:21 crc kubenswrapper[4932]: I1125 11:13:21.701332 4932 scope.go:117] "RemoveContainer" containerID="17897f06fab4c172257a616b48b13b26ffaa2180c75f533e1a8b11f1804ed610" Nov 25 11:13:21 crc kubenswrapper[4932]: E1125 11:13:21.701939 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(92b56b05-8133-4f01-a855-7bb7f523b38c)\"" pod="openstack/kube-state-metrics-0" podUID="92b56b05-8133-4f01-a855-7bb7f523b38c" Nov 25 11:13:23 crc kubenswrapper[4932]: I1125 11:13:23.229313 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 11:13:23 crc kubenswrapper[4932]: I1125 11:13:23.360769 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 11:13:23 crc kubenswrapper[4932]: I1125 11:13:23.803021 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 11:13:23 crc kubenswrapper[4932]: I1125 11:13:23.815867 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 11:13:23 crc kubenswrapper[4932]: I1125 11:13:23.911949 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 11:13:23 crc kubenswrapper[4932]: I1125 11:13:23.987475 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.154334 4932 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.154391 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.161955 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-bm44s" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.321287 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.405412 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.523068 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.568812 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-cqv7v" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.648967 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.723734 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-4g7km" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.735145 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.784352 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.817559 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.892055 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.903624 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.936546 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.937122 4932 scope.go:117] "RemoveContainer" containerID="78202d006338b185ad300800279cc203ec2b3a603a818b3c1667f12a4c7afe7a" Nov 25 11:13:24 crc kubenswrapper[4932]: E1125 11:13:24.937436 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-76dcd9496-2bqxl_metallb-system(b7db3ea2-66e6-46f2-93b4-4c8405a1b566)\"" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" podUID="b7db3ea2-66e6-46f2-93b4-4c8405a1b566" Nov 25 11:13:24 crc kubenswrapper[4932]: I1125 11:13:24.988763 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.002273 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.114579 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.134602 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.159127 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.264675 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.283458 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.289946 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.306730 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.520126 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-jfmk2" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.529103 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-nlzgm" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.534911 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.540071 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.544573 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.656055 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.807148 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.822393 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.835249 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.913760 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.968848 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.969398 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-zpw6x" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.977624 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 25 11:13:25 crc kubenswrapper[4932]: I1125 11:13:25.982797 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.120380 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"octavia-hmport-map" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.230077 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.283501 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.380125 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-scripts" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.407887 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.427885 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.460437 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.461372 4932 scope.go:117] "RemoveContainer" containerID="69afe2c47b7ffb15fcbeacc769b33e8ad1dbcb4063050ff08e9088ffbab1713e" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.481806 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.482522 4932 scope.go:117] "RemoveContainer" containerID="0df0b6e1d926aa7ad052b9bf1c62ef915500bbe1f2050a184b8b05caf5a1a124" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.500436 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.501342 4932 scope.go:117] "RemoveContainer" containerID="d3206633dca19f19340f17b3188cb853f2f6351a89ec9fc90bca8ddf294928e7" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.530790 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.557741 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.571259 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.594631 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.595768 4932 scope.go:117] "RemoveContainer" containerID="2f3c15cefdb88949311914ce11f1d57b7f443ce681cb20aaeb6e617e8b8b6e3e" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.606558 4932 scope.go:117] "RemoveContainer" containerID="8bc6203648518a3b687346a4807cd4f2ae21ba51b9995343801a123d9b2742d6" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.610605 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.631325 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.632208 4932 scope.go:117] "RemoveContainer" containerID="db2d2d4cca139b24fe19a0750feaae79ed46b4e30cd3e137fa4990a5f8f36fae" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.714945 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.723596 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.727077 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.737478 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-wftr4" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.832111 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.833283 4932 scope.go:117] "RemoveContainer" containerID="9b457159a21bb66dd3dd7116ff63bfa3461a21fd42462777c4194b0ca169a348" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.864874 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.866228 4932 scope.go:117] "RemoveContainer" containerID="d0887b6d91e1075b08b890160d43a8859a50e7e6589e95b7f72ac08efa5f8dac" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.894997 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.895140 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.895963 4932 scope.go:117] "RemoveContainer" containerID="8edade2dfc90e89f4af3626615b53f4f8b21225aab41ae194074f301680146e5" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.896078 4932 scope.go:117] "RemoveContainer" containerID="717bd6085399aed5ee34848934bc654dc64e91e981ca300f2881dc7b3fba2379" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.926891 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.929533 4932 scope.go:117] "RemoveContainer" containerID="17e1be5cb241a41deb5ec7091aab21a54f05e52dd54efcffa68558d08521aaeb" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.931418 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.940348 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.940879 4932 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.941966 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-7mqtl" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.949961 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.955164 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.955243 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.958716 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.959642 4932 scope.go:117] "RemoveContainer" containerID="78b6b2bd71ed54beb3222ebf166814ae4c43cfe70040bd8c6759800db4fbe9ee" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.960974 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.962940 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.963750 4932 scope.go:117] "RemoveContainer" containerID="77241dd472a7452e0c2309e9174d92e08cfa692c88e66603e190283ab32c6796" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.980023 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 11:13:26 crc kubenswrapper[4932]: I1125 11:13:26.984109 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=13.984088041 podStartE2EDuration="13.984088041s" podCreationTimestamp="2025-11-25 11:13:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 11:13:26.976080621 +0000 UTC m=+8667.102110184" watchObservedRunningTime="2025-11-25 11:13:26.984088041 +0000 UTC m=+8667.110117604" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.032723 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.037686 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.038605 4932 scope.go:117] "RemoveContainer" containerID="cb14d4d6d55c9748f99327d7c03fdee060f3f08b2e093cc6c0d73a2f4825773a" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.118877 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.119832 4932 scope.go:117] "RemoveContainer" containerID="21f9b1e0cd09c8c481cfc9a920f21f304b5f643e0b4190b9ffbb40b0ca4fbba1" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.121789 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-djrc4" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.136787 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-fr24g" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.139910 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-d5glh" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.140413 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.186229 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.217679 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-config-data" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.222609 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.232473 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.233948 4932 scope.go:117] "RemoveContainer" containerID="1310bdba5d097d269e7743954f682bb124356c19b293e9ec1d7b616bda5011db" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.235538 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.245326 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.246612 4932 scope.go:117] "RemoveContainer" containerID="da23984aafc361b5cf78fd93a16bb641b17848344a292b41d71ccbd50eedc9b8" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.279448 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.280890 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.356827 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.360904 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.387461 4932 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-spbrq" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.395188 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.400082 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.425785 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.428295 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.441862 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.464590 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.466604 4932 scope.go:117] "RemoveContainer" containerID="8deeb5e2a244df4ff2b4ea87d99ae84d521f8ac30b6eb7f0915a625194d7a8b1" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.475384 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.492000 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.492033 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.492755 4932 scope.go:117] "RemoveContainer" containerID="79bd3780ca6b6ab2cc239b212a02253964baa33933f3692f3d4b77ba4ca66b08" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.569625 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.581716 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cb74df96-cjzfx" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.587540 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.649530 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.654121 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.670890 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.673654 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-h5wsp" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.704744 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.707416 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.775665 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.776923 4932 generic.go:334] "Generic (PLEG): container finished" podID="bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd" containerID="9c220f165ee1f3c3ddc51a1485a6a95002a888bc55043315accf3f4d92519d3e" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.777003 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" event={"ID":"bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd","Type":"ContainerDied","Data":"9c220f165ee1f3c3ddc51a1485a6a95002a888bc55043315accf3f4d92519d3e"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.777229 4932 scope.go:117] "RemoveContainer" containerID="0df0b6e1d926aa7ad052b9bf1c62ef915500bbe1f2050a184b8b05caf5a1a124" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.778324 4932 scope.go:117] "RemoveContainer" containerID="9c220f165ee1f3c3ddc51a1485a6a95002a888bc55043315accf3f4d92519d3e" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.778870 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-t6t6s_openstack-operators(bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" podUID="bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.779724 4932 generic.go:334] "Generic (PLEG): container finished" podID="96d031ad-3550-4423-9422-93911c9a8217" containerID="6be6ce6f4fadd03ca1931f5c7ec31d8f935ad210f55914574ed58a70d4290f33" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.779800 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" event={"ID":"96d031ad-3550-4423-9422-93911c9a8217","Type":"ContainerDied","Data":"6be6ce6f4fadd03ca1931f5c7ec31d8f935ad210f55914574ed58a70d4290f33"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.780578 4932 scope.go:117] "RemoveContainer" containerID="6be6ce6f4fadd03ca1931f5c7ec31d8f935ad210f55914574ed58a70d4290f33" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.780873 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-ps52v_openstack-operators(96d031ad-3550-4423-9422-93911c9a8217)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" podUID="96d031ad-3550-4423-9422-93911c9a8217" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.785891 4932 generic.go:334] "Generic (PLEG): container finished" podID="6dedf441-145d-4642-a0f0-fb691d2edd2d" containerID="0b8fad67fbb4ad2b8197478c887396dd37068d0c413e4508f01b5df4df8b7ff8" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.785973 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" event={"ID":"6dedf441-145d-4642-a0f0-fb691d2edd2d","Type":"ContainerDied","Data":"0b8fad67fbb4ad2b8197478c887396dd37068d0c413e4508f01b5df4df8b7ff8"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.786753 4932 scope.go:117] "RemoveContainer" containerID="0b8fad67fbb4ad2b8197478c887396dd37068d0c413e4508f01b5df4df8b7ff8" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.788363 4932 generic.go:334] "Generic (PLEG): container finished" podID="bde38973-f401-4917-8abc-08dafaf8f10c" containerID="594e0fdeba523919767ce551fe3eaa2f10957060a840a23fe357121030cfff70" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.788542 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" event={"ID":"bde38973-f401-4917-8abc-08dafaf8f10c","Type":"ContainerDied","Data":"594e0fdeba523919767ce551fe3eaa2f10957060a840a23fe357121030cfff70"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.789335 4932 scope.go:117] "RemoveContainer" containerID="594e0fdeba523919767ce551fe3eaa2f10957060a840a23fe357121030cfff70" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.789602 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-68b95954c9-x4l6r_openstack-operators(bde38973-f401-4917-8abc-08dafaf8f10c)\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" podUID="bde38973-f401-4917-8abc-08dafaf8f10c" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.791517 4932 generic.go:334] "Generic (PLEG): container finished" podID="65fb5603-367e-431f-a8d3-0a3281a70361" containerID="2fe6efa081a343abbc63f3989472265ad235422b11ff49cb092994e153842585" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.791567 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" event={"ID":"65fb5603-367e-431f-a8d3-0a3281a70361","Type":"ContainerDied","Data":"2fe6efa081a343abbc63f3989472265ad235422b11ff49cb092994e153842585"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.792278 4932 scope.go:117] "RemoveContainer" containerID="2fe6efa081a343abbc63f3989472265ad235422b11ff49cb092994e153842585" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.792573 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-jg9pn_openstack-operators(65fb5603-367e-431f-a8d3-0a3281a70361)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" podUID="65fb5603-367e-431f-a8d3-0a3281a70361" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.793304 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-b9l7b_openstack-operators(6dedf441-145d-4642-a0f0-fb691d2edd2d)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" podUID="6dedf441-145d-4642-a0f0-fb691d2edd2d" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.799840 4932 generic.go:334] "Generic (PLEG): container finished" podID="e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d" containerID="47cd7872ffbfdde95460a0502578f2e60883297afc055bd2b7abdbbc78d9e15e" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.799951 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" event={"ID":"e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d","Type":"ContainerDied","Data":"47cd7872ffbfdde95460a0502578f2e60883297afc055bd2b7abdbbc78d9e15e"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.800768 4932 scope.go:117] "RemoveContainer" containerID="47cd7872ffbfdde95460a0502578f2e60883297afc055bd2b7abdbbc78d9e15e" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.801089 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-bbmvf_openstack-operators(e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" podUID="e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.803694 4932 generic.go:334] "Generic (PLEG): container finished" podID="6fcca084-72cb-48ba-948f-6c4d861f6096" containerID="321d4419e8cad686272e3fceacb1607751d371e203ccd3a4a5675f0a1fc8cf13" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.803760 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" event={"ID":"6fcca084-72cb-48ba-948f-6c4d861f6096","Type":"ContainerDied","Data":"321d4419e8cad686272e3fceacb1607751d371e203ccd3a4a5675f0a1fc8cf13"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.804500 4932 scope.go:117] "RemoveContainer" containerID="321d4419e8cad686272e3fceacb1607751d371e203ccd3a4a5675f0a1fc8cf13" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.804781 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-phkzd_openstack-operators(6fcca084-72cb-48ba-948f-6c4d861f6096)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" podUID="6fcca084-72cb-48ba-948f-6c4d861f6096" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.806654 4932 generic.go:334] "Generic (PLEG): container finished" podID="dae34761-581e-4f65-8d7c-d6c2d302b4f7" containerID="ca1b590d0f66ab283d53143a55c1d064fd2eec81f8fc8a59c3fabeba9129fddb" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.806704 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" event={"ID":"dae34761-581e-4f65-8d7c-d6c2d302b4f7","Type":"ContainerDied","Data":"ca1b590d0f66ab283d53143a55c1d064fd2eec81f8fc8a59c3fabeba9129fddb"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.807086 4932 scope.go:117] "RemoveContainer" containerID="ca1b590d0f66ab283d53143a55c1d064fd2eec81f8fc8a59c3fabeba9129fddb" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.807385 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-q7rt6_openstack-operators(dae34761-581e-4f65-8d7c-d6c2d302b4f7)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" podUID="dae34761-581e-4f65-8d7c-d6c2d302b4f7" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.809941 4932 generic.go:334] "Generic (PLEG): container finished" podID="8c014265-53e2-4c4d-9c25-452686712f2e" containerID="4c8fd51ffda023a4309cbf1429435b6c532329159a36feebbcd8898e09f5e26c" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.809988 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" event={"ID":"8c014265-53e2-4c4d-9c25-452686712f2e","Type":"ContainerDied","Data":"4c8fd51ffda023a4309cbf1429435b6c532329159a36feebbcd8898e09f5e26c"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.810423 4932 scope.go:117] "RemoveContainer" containerID="4c8fd51ffda023a4309cbf1429435b6c532329159a36feebbcd8898e09f5e26c" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.810858 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-4wph8_openstack-operators(8c014265-53e2-4c4d-9c25-452686712f2e)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" podUID="8c014265-53e2-4c4d-9c25-452686712f2e" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.814156 4932 generic.go:334] "Generic (PLEG): container finished" podID="45ebb480-733b-47a3-a682-8fe0be16eb78" containerID="6e3f0bcabd684bef0f057b442d25c0f22b782e20ed02489a6197bad31126bb0a" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.814389 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" event={"ID":"45ebb480-733b-47a3-a682-8fe0be16eb78","Type":"ContainerDied","Data":"6e3f0bcabd684bef0f057b442d25c0f22b782e20ed02489a6197bad31126bb0a"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.814676 4932 scope.go:117] "RemoveContainer" containerID="6e3f0bcabd684bef0f057b442d25c0f22b782e20ed02489a6197bad31126bb0a" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.815284 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-pkjjd_openstack-operators(45ebb480-733b-47a3-a682-8fe0be16eb78)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podUID="45ebb480-733b-47a3-a682-8fe0be16eb78" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.820937 4932 generic.go:334] "Generic (PLEG): container finished" podID="a92ad4a6-d922-45c1-b02d-f382b1ea1cc0" containerID="46e9334f161851ff2c82016c6fe298052cbbe11761a6447845774536bf5f8cb0" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.821023 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" event={"ID":"a92ad4a6-d922-45c1-b02d-f382b1ea1cc0","Type":"ContainerDied","Data":"46e9334f161851ff2c82016c6fe298052cbbe11761a6447845774536bf5f8cb0"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.821961 4932 scope.go:117] "RemoveContainer" containerID="46e9334f161851ff2c82016c6fe298052cbbe11761a6447845774536bf5f8cb0" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.822348 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-m2tpg_openstack-operators(a92ad4a6-d922-45c1-b02d-f382b1ea1cc0)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" podUID="a92ad4a6-d922-45c1-b02d-f382b1ea1cc0" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.827087 4932 generic.go:334] "Generic (PLEG): container finished" podID="af96b4c7-e9eb-4609-afab-ba3cc15f0a48" containerID="7197d1a1f02b016dc74d7997a6b26a25b0f9c10460d84e4ac28ae10ebfb271c4" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.827155 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" event={"ID":"af96b4c7-e9eb-4609-afab-ba3cc15f0a48","Type":"ContainerDied","Data":"7197d1a1f02b016dc74d7997a6b26a25b0f9c10460d84e4ac28ae10ebfb271c4"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.827818 4932 scope.go:117] "RemoveContainer" containerID="7197d1a1f02b016dc74d7997a6b26a25b0f9c10460d84e4ac28ae10ebfb271c4" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.828097 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-rcv5q_openstack-operators(af96b4c7-e9eb-4609-afab-ba3cc15f0a48)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" podUID="af96b4c7-e9eb-4609-afab-ba3cc15f0a48" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.842265 4932 generic.go:334] "Generic (PLEG): container finished" podID="12f70ae4-14e2-4eed-9c1d-29e380a6d757" containerID="6c84cbc7d3b16bab76c6590d21ef354cebbad8cfcb201a6c57bec2c6cf509ed2" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.842345 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" event={"ID":"12f70ae4-14e2-4eed-9c1d-29e380a6d757","Type":"ContainerDied","Data":"6c84cbc7d3b16bab76c6590d21ef354cebbad8cfcb201a6c57bec2c6cf509ed2"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.843140 4932 scope.go:117] "RemoveContainer" containerID="6c84cbc7d3b16bab76c6590d21ef354cebbad8cfcb201a6c57bec2c6cf509ed2" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.843450 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-fjlpt_openstack-operators(12f70ae4-14e2-4eed-9c1d-29e380a6d757)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" podUID="12f70ae4-14e2-4eed-9c1d-29e380a6d757" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.848026 4932 generic.go:334] "Generic (PLEG): container finished" podID="243ff257-9836-4e43-9228-e05f18282650" containerID="f146425e746d7c86d256614de1eb772b459a6ba29f81bc051e76f5ff8aab9e6d" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.848090 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" event={"ID":"243ff257-9836-4e43-9228-e05f18282650","Type":"ContainerDied","Data":"f146425e746d7c86d256614de1eb772b459a6ba29f81bc051e76f5ff8aab9e6d"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.848809 4932 scope.go:117] "RemoveContainer" containerID="f146425e746d7c86d256614de1eb772b459a6ba29f81bc051e76f5ff8aab9e6d" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.849070 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-cwqvg_openstack-operators(243ff257-9836-4e43-9228-e05f18282650)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" podUID="243ff257-9836-4e43-9228-e05f18282650" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.853337 4932 generic.go:334] "Generic (PLEG): container finished" podID="d2216d92-9e2d-4549-b634-63ec3ada9f14" containerID="5698c0f6c7d8a2894640c72e0feabb127c272ffed7be3ff4537b11a55984e5c7" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.853416 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" event={"ID":"d2216d92-9e2d-4549-b634-63ec3ada9f14","Type":"ContainerDied","Data":"5698c0f6c7d8a2894640c72e0feabb127c272ffed7be3ff4537b11a55984e5c7"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.855118 4932 scope.go:117] "RemoveContainer" containerID="5698c0f6c7d8a2894640c72e0feabb127c272ffed7be3ff4537b11a55984e5c7" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.858278 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-bk2nv_openstack-operators(d2216d92-9e2d-4549-b634-63ec3ada9f14)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" podUID="d2216d92-9e2d-4549-b634-63ec3ada9f14" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.862048 4932 generic.go:334] "Generic (PLEG): container finished" podID="070a395c-8ac5-4303-80fb-7f93282a9f99" containerID="95bc213b96c2e40877abef34c4b3f3c23263937e72406cef1e77d8741f61515c" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.862129 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" event={"ID":"070a395c-8ac5-4303-80fb-7f93282a9f99","Type":"ContainerDied","Data":"95bc213b96c2e40877abef34c4b3f3c23263937e72406cef1e77d8741f61515c"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.862976 4932 scope.go:117] "RemoveContainer" containerID="95bc213b96c2e40877abef34c4b3f3c23263937e72406cef1e77d8741f61515c" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.863336 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rm8qr_openstack-operators(070a395c-8ac5-4303-80fb-7f93282a9f99)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" podUID="070a395c-8ac5-4303-80fb-7f93282a9f99" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.866552 4932 generic.go:334] "Generic (PLEG): container finished" podID="765f296f-cd42-4f2c-9b21-2bcbc65d490c" containerID="753ce2405a7150ab8cf76e7d80a19d6725906738039472a86f69449bcceabb6d" exitCode=1 Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.866655 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" event={"ID":"765f296f-cd42-4f2c-9b21-2bcbc65d490c","Type":"ContainerDied","Data":"753ce2405a7150ab8cf76e7d80a19d6725906738039472a86f69449bcceabb6d"} Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.867749 4932 scope.go:117] "RemoveContainer" containerID="753ce2405a7150ab8cf76e7d80a19d6725906738039472a86f69449bcceabb6d" Nov 25 11:13:27 crc kubenswrapper[4932]: E1125 11:13:27.868130 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-d5cc86f4b-5bkct_openstack-operators(765f296f-cd42-4f2c-9b21-2bcbc65d490c)\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" podUID="765f296f-cd42-4f2c-9b21-2bcbc65d490c" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.878226 4932 scope.go:117] "RemoveContainer" containerID="69afe2c47b7ffb15fcbeacc769b33e8ad1dbcb4063050ff08e9088ffbab1713e" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.926942 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.942319 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 25 11:13:27 crc kubenswrapper[4932]: I1125 11:13:27.963559 4932 scope.go:117] "RemoveContainer" containerID="d0887b6d91e1075b08b890160d43a8859a50e7e6589e95b7f72ac08efa5f8dac" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.120964 4932 scope.go:117] "RemoveContainer" containerID="9b457159a21bb66dd3dd7116ff63bfa3461a21fd42462777c4194b0ca169a348" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.154166 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.164654 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.165711 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-q5zks" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.205760 4932 scope.go:117] "RemoveContainer" containerID="db2d2d4cca139b24fe19a0750feaae79ed46b4e30cd3e137fa4990a5f8f36fae" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.213309 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.230905 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.265543 4932 scope.go:117] "RemoveContainer" containerID="77241dd472a7452e0c2309e9174d92e08cfa692c88e66603e190283ab32c6796" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.299733 4932 scope.go:117] "RemoveContainer" containerID="17e1be5cb241a41deb5ec7091aab21a54f05e52dd54efcffa68558d08521aaeb" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.324846 4932 scope.go:117] "RemoveContainer" containerID="78b6b2bd71ed54beb3222ebf166814ae4c43cfe70040bd8c6759800db4fbe9ee" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.352840 4932 scope.go:117] "RemoveContainer" containerID="21f9b1e0cd09c8c481cfc9a920f21f304b5f643e0b4190b9ffbb40b0ca4fbba1" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.380637 4932 scope.go:117] "RemoveContainer" containerID="8bc6203648518a3b687346a4807cd4f2ae21ba51b9995343801a123d9b2742d6" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.406709 4932 scope.go:117] "RemoveContainer" containerID="d3206633dca19f19340f17b3188cb853f2f6351a89ec9fc90bca8ddf294928e7" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.420144 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.432598 4932 scope.go:117] "RemoveContainer" containerID="cb14d4d6d55c9748f99327d7c03fdee060f3f08b2e093cc6c0d73a2f4825773a" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.458915 4932 scope.go:117] "RemoveContainer" containerID="717bd6085399aed5ee34848934bc654dc64e91e981ca300f2881dc7b3fba2379" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.459282 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.459630 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/kube-state-metrics-0" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.460081 4932 scope.go:117] "RemoveContainer" containerID="17897f06fab4c172257a616b48b13b26ffaa2180c75f533e1a8b11f1804ed610" Nov 25 11:13:28 crc kubenswrapper[4932]: E1125 11:13:28.460537 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(92b56b05-8133-4f01-a855-7bb7f523b38c)\"" pod="openstack/kube-state-metrics-0" podUID="92b56b05-8133-4f01-a855-7bb7f523b38c" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.477152 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.485919 4932 scope.go:117] "RemoveContainer" containerID="1310bdba5d097d269e7743954f682bb124356c19b293e9ec1d7b616bda5011db" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.494249 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.520961 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-kmtcm" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.523530 4932 scope.go:117] "RemoveContainer" containerID="2f3c15cefdb88949311914ce11f1d57b7f443ce681cb20aaeb6e617e8b8b6e3e" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.530125 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.568769 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.620698 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.632389 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-scripts" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.650165 4932 scope.go:117] "RemoveContainer" containerID="8edade2dfc90e89f4af3626615b53f4f8b21225aab41ae194074f301680146e5" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.655706 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.659830 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.662947 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.689125 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.690147 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.721225 4932 scope.go:117] "RemoveContainer" containerID="da23984aafc361b5cf78fd93a16bb641b17848344a292b41d71ccbd50eedc9b8" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.729276 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.806735 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.828516 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.833534 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.868462 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.878312 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.891397 4932 scope.go:117] "RemoveContainer" containerID="6e3f0bcabd684bef0f057b442d25c0f22b782e20ed02489a6197bad31126bb0a" Nov 25 11:13:28 crc kubenswrapper[4932]: E1125 11:13:28.891664 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-pkjjd_openstack-operators(45ebb480-733b-47a3-a682-8fe0be16eb78)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podUID="45ebb480-733b-47a3-a682-8fe0be16eb78" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.893793 4932 generic.go:334] "Generic (PLEG): container finished" podID="d4860edf-9f45-4dd2-8e35-7c3a4444370a" containerID="4565b24aaac5b06b87d85cb6970b4ed78a8f21a3363de55c7f178311e200c9a3" exitCode=1 Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.893879 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" event={"ID":"d4860edf-9f45-4dd2-8e35-7c3a4444370a","Type":"ContainerDied","Data":"4565b24aaac5b06b87d85cb6970b4ed78a8f21a3363de55c7f178311e200c9a3"} Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.893930 4932 scope.go:117] "RemoveContainer" containerID="8deeb5e2a244df4ff2b4ea87d99ae84d521f8ac30b6eb7f0915a625194d7a8b1" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.895078 4932 scope.go:117] "RemoveContainer" containerID="4565b24aaac5b06b87d85cb6970b4ed78a8f21a3363de55c7f178311e200c9a3" Nov 25 11:13:28 crc kubenswrapper[4932]: E1125 11:13:28.895439 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-tkjb4_openstack-operators(d4860edf-9f45-4dd2-8e35-7c3a4444370a)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" podUID="d4860edf-9f45-4dd2-8e35-7c3a4444370a" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.899494 4932 generic.go:334] "Generic (PLEG): container finished" podID="695ce8a3-6a30-42a4-8ba2-f6309470362c" containerID="5e49d66515e26860a98b9b2132fd198f0f788a7552146e7ddb3fe047848e46c0" exitCode=1 Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.899584 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" event={"ID":"695ce8a3-6a30-42a4-8ba2-f6309470362c","Type":"ContainerDied","Data":"5e49d66515e26860a98b9b2132fd198f0f788a7552146e7ddb3fe047848e46c0"} Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.901523 4932 scope.go:117] "RemoveContainer" containerID="5e49d66515e26860a98b9b2132fd198f0f788a7552146e7ddb3fe047848e46c0" Nov 25 11:13:28 crc kubenswrapper[4932]: E1125 11:13:28.902050 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-567f98c9d-blm28_openstack-operators(695ce8a3-6a30-42a4-8ba2-f6309470362c)\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" podUID="695ce8a3-6a30-42a4-8ba2-f6309470362c" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.914870 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.915068 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.924323 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-zh4cp" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.956851 4932 scope.go:117] "RemoveContainer" containerID="17897f06fab4c172257a616b48b13b26ffaa2180c75f533e1a8b11f1804ed610" Nov 25 11:13:28 crc kubenswrapper[4932]: E1125 11:13:28.957124 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-state-metrics pod=kube-state-metrics-0_openstack(92b56b05-8133-4f01-a855-7bb7f523b38c)\"" pod="openstack/kube-state-metrics-0" podUID="92b56b05-8133-4f01-a855-7bb7f523b38c" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.965741 4932 scope.go:117] "RemoveContainer" containerID="79bd3780ca6b6ab2cc239b212a02253964baa33933f3692f3d4b77ba4ca66b08" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.995115 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 11:13:28 crc kubenswrapper[4932]: I1125 11:13:28.995352 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.003891 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-s7mw2" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.039852 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-lrcw4" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.064000 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.064033 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.066841 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.091052 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.154591 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-tckm9" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.160398 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-certs-secret" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.169960 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.189963 4932 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.191396 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-2c5dk" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.193477 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.219709 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.230047 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.277462 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-2zs26" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.312872 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-xq42j" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.375638 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.380690 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.400415 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.437534 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.445726 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.446747 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.480865 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.485561 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.496689 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.501621 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.508351 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.511620 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.529123 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-7b567956b5-842jr" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.547281 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-wzvrk" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.576177 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-dvb8s" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.591853 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.596579 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.617229 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.655580 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.661451 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.686201 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.693618 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.765704 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-hgw57" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.782691 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-9dnd9" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.802471 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.802547 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.823249 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.886298 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.891639 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.927387 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 25 11:13:29 crc kubenswrapper[4932]: I1125 11:13:29.970318 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.013051 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.033789 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.067748 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.072536 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.080817 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.081328 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.088175 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.102662 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.142858 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.178588 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.178980 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.193889 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.219186 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.236987 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.270897 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-scripts" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.275212 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.275443 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.288656 4932 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-7xdst" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.313953 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.531725 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.601042 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.619074 4932 scope.go:117] "RemoveContainer" containerID="5a30c98501b01fe8e778fbc0442397c643b58ac64e7618fe1570f411888ba896" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.661268 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-gr8tp" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.671349 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-b58f89467-lmss6" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.687247 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.703565 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.706603 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.713670 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.717789 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.732866 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-config-data" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.755597 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.764723 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.769290 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.819073 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.840575 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.950486 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-8h5xh" Nov 25 11:13:30 crc kubenswrapper[4932]: I1125 11:13:30.983312 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-bwv87" event={"ID":"1b5af146-d2d1-4526-8a10-84ebc35baca8","Type":"ContainerStarted","Data":"efda211215631215dbfd9ecdb4972c508a2af796475b5f9f4d13a45dfdcdc547"} Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.003986 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.004883 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.013310 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.015746 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.038533 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.105793 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.114773 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.120784 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.146517 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.201639 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.225111 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-scripts" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.251851 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.256407 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.257205 4932 scope.go:117] "RemoveContainer" containerID="0f4638b37c0633115650078c32626b083c4fe815278a256bdf196366c7ccc97c" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.264966 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.409795 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-scripts" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.431049 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.452370 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-8l9k2" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.452399 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.464909 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.468161 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.470430 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-rm8hg" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.525106 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.579949 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.585488 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.607314 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:13:31 crc kubenswrapper[4932]: E1125 11:13:31.607678 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.638026 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.639287 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.691980 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.696580 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.723765 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.750821 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.776441 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.808303 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.841956 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.860046 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.900050 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.936382 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.977125 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.994954 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" event={"ID":"66b0ef7a-14c8-4702-8e52-67809a677880","Type":"ContainerStarted","Data":"63f428c8f1a5e1c9e8e2a2e03698475d90942df3300973dde731563df535e306"} Nov 25 11:13:31 crc kubenswrapper[4932]: I1125 11:13:31.995176 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.013488 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-kff9b" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.063069 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.119721 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.119748 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.127642 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-jt78v" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.157761 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.167876 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.177306 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.182963 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.247639 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.290036 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-zs62z" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.308385 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.309535 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.316711 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.334334 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.426434 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.435712 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.456115 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.512292 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.587303 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-s2fkz" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.619040 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-rwzlp" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.646562 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.657788 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.659345 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.688874 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.705771 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.766937 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-dgtlx" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.769694 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-tmn25" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.783918 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.793536 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.799268 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.800007 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.872725 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.896112 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.911224 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.937695 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.950977 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.965009 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.982580 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 11:13:32 crc kubenswrapper[4932]: I1125 11:13:32.989788 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.000656 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.021752 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.075724 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-zg8db" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.090777 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.100813 4932 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.165715 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.171384 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.176689 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-275wq" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.208632 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.255388 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.281609 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.296268 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.328217 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.394293 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.394702 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.415785 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.417017 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-mx44d" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.426227 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.522030 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.523816 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.538353 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-56bmt" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.560617 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.599926 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.653279 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-7lfkp" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.659618 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.673853 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-9kmsl" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.675136 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.684025 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.687741 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.697968 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.728905 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.728904 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.759241 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.764496 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.775755 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.819346 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.831436 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-config-data" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.833569 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-public-svc" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.839446 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.862332 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.864060 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.875913 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.877437 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.913876 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-nnbrb" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.928796 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.951687 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.971575 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 11:13:33 crc kubenswrapper[4932]: I1125 11:13:33.987743 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.012421 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.025371 4932 generic.go:334] "Generic (PLEG): container finished" podID="6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93" containerID="b4c4559bceccf81fdaab6f6b7cfb015be19956e6d2f5f89073fd72d3f0b4e368" exitCode=1 Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.025422 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" event={"ID":"6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93","Type":"ContainerDied","Data":"b4c4559bceccf81fdaab6f6b7cfb015be19956e6d2f5f89073fd72d3f0b4e368"} Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.026153 4932 scope.go:117] "RemoveContainer" containerID="b4c4559bceccf81fdaab6f6b7cfb015be19956e6d2f5f89073fd72d3f0b4e368" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.041375 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-x64ll" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.044153 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.065044 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-sxl4j" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.077140 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.109984 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.146948 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.153466 4932 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.153512 4932 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.153565 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.154562 4932 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"c48febefcc9258f783b207adcbe934f8260825b0b936d2d1d85c4de7fcea5a99"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.154685 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://c48febefcc9258f783b207adcbe934f8260825b0b936d2d1d85c4de7fcea5a99" gracePeriod=30 Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.208356 4932 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.265531 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.272931 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.311037 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.322533 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.353166 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.355292 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.358825 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-98ts2" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.376744 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.378867 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.383383 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.407494 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.493041 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.509520 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.518972 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.547207 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.558448 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.575497 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.688245 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.699591 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.724287 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.743951 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.751660 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-x9vzz" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.766526 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.775096 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-config-data" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.848101 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.918363 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.923295 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.933661 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.950527 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 11:13:34 crc kubenswrapper[4932]: I1125 11:13:34.970844 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.025763 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.030659 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.037671 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-hdn5j" event={"ID":"6f9b2a8f-5fe6-4c38-ab9b-ef9e30db8d93","Type":"ContainerStarted","Data":"d22d78399f461ea87bff17fc9f8cb32adcc3c9a7f2243b330c5270579889cd8a"} Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.045366 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.110529 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.110612 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.181263 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.238576 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.253211 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.270355 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-octavia-dockercfg-6fb4r" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.290100 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.332798 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.415940 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.450908 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.510815 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.512161 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.529974 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.543172 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.564058 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.567668 4932 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-skjhc" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.601082 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.606711 4932 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.607287 4932 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://8d9fa95132aa8985559983253b5d2d61094b2a502076f4a14c011df3a4ae9e86" gracePeriod=5 Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.640107 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.699682 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.722643 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.735623 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.746038 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.794967 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.802833 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.804986 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-hvdw4" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.812643 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.875704 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-skfts" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.913080 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.947657 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.950492 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 11:13:35 crc kubenswrapper[4932]: I1125 11:13:35.966690 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-gcfvd" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.028316 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.050660 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.138164 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.138229 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.143385 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.163777 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.195946 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-msxsh" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.224057 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.224169 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.229676 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.233490 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-nx627" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.251542 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.338827 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.365241 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.382505 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.406015 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-5w7bk" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.424239 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.439874 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.459872 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.460781 4932 scope.go:117] "RemoveContainer" containerID="6be6ce6f4fadd03ca1931f5c7ec31d8f935ad210f55914574ed58a70d4290f33" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.461243 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-ps52v_openstack-operators(96d031ad-3550-4423-9422-93911c9a8217)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" podUID="96d031ad-3550-4423-9422-93911c9a8217" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.465960 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.481882 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.482940 4932 scope.go:117] "RemoveContainer" containerID="9c220f165ee1f3c3ddc51a1485a6a95002a888bc55043315accf3f4d92519d3e" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.483421 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-t6t6s_openstack-operators(bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" podUID="bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.499784 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.501181 4932 scope.go:117] "RemoveContainer" containerID="46e9334f161851ff2c82016c6fe298052cbbe11761a6447845774536bf5f8cb0" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.502078 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-m2tpg_openstack-operators(a92ad4a6-d922-45c1-b02d-f382b1ea1cc0)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" podUID="a92ad4a6-d922-45c1-b02d-f382b1ea1cc0" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.503040 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.549499 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.557762 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.594743 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.595610 4932 scope.go:117] "RemoveContainer" containerID="5698c0f6c7d8a2894640c72e0feabb127c272ffed7be3ff4537b11a55984e5c7" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.596136 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-bk2nv_openstack-operators(d2216d92-9e2d-4549-b634-63ec3ada9f14)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" podUID="d2216d92-9e2d-4549-b634-63ec3ada9f14" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.620423 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.621157 4932 scope.go:117] "RemoveContainer" containerID="2fe6efa081a343abbc63f3989472265ad235422b11ff49cb092994e153842585" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.622850 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-jg9pn_openstack-operators(65fb5603-367e-431f-a8d3-0a3281a70361)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" podUID="65fb5603-367e-431f-a8d3-0a3281a70361" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.625243 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.649058 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.662248 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.679292 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.700987 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.724776 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-internal-svc" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.771072 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.816666 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.816924 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.832294 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.833209 4932 scope.go:117] "RemoveContainer" containerID="594e0fdeba523919767ce551fe3eaa2f10957060a840a23fe357121030cfff70" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.833600 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-68b95954c9-x4l6r_openstack-operators(bde38973-f401-4917-8abc-08dafaf8f10c)\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" podUID="bde38973-f401-4917-8abc-08dafaf8f10c" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.864984 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.866070 4932 scope.go:117] "RemoveContainer" containerID="0b8fad67fbb4ad2b8197478c887396dd37068d0c413e4508f01b5df4df8b7ff8" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.866450 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-b9l7b_openstack-operators(6dedf441-145d-4642-a0f0-fb691d2edd2d)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" podUID="6dedf441-145d-4642-a0f0-fb691d2edd2d" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.880782 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-9q5b9" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.895307 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.895589 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.896480 4932 scope.go:117] "RemoveContainer" containerID="6c84cbc7d3b16bab76c6590d21ef354cebbad8cfcb201a6c57bec2c6cf509ed2" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.896788 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-fjlpt_openstack-operators(12f70ae4-14e2-4eed-9c1d-29e380a6d757)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" podUID="12f70ae4-14e2-4eed-9c1d-29e380a6d757" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.897038 4932 scope.go:117] "RemoveContainer" containerID="95bc213b96c2e40877abef34c4b3f3c23263937e72406cef1e77d8741f61515c" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.897688 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rm8qr_openstack-operators(070a395c-8ac5-4303-80fb-7f93282a9f99)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" podUID="070a395c-8ac5-4303-80fb-7f93282a9f99" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.914769 4932 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-wttcj" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.927236 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-74crs" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.927476 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.928390 4932 scope.go:117] "RemoveContainer" containerID="321d4419e8cad686272e3fceacb1607751d371e203ccd3a4a5675f0a1fc8cf13" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.928701 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-phkzd_openstack-operators(6fcca084-72cb-48ba-948f-6c4d861f6096)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" podUID="6fcca084-72cb-48ba-948f-6c4d861f6096" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.952707 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.953425 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.953673 4932 scope.go:117] "RemoveContainer" containerID="ca1b590d0f66ab283d53143a55c1d064fd2eec81f8fc8a59c3fabeba9129fddb" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.954008 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-q7rt6_openstack-operators(dae34761-581e-4f65-8d7c-d6c2d302b4f7)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" podUID="dae34761-581e-4f65-8d7c-d6c2d302b4f7" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.954485 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.962853 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 11:13:36 crc kubenswrapper[4932]: I1125 11:13:36.963940 4932 scope.go:117] "RemoveContainer" containerID="47cd7872ffbfdde95460a0502578f2e60883297afc055bd2b7abdbbc78d9e15e" Nov 25 11:13:36 crc kubenswrapper[4932]: E1125 11:13:36.964409 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-bbmvf_openstack-operators(e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" podUID="e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.010914 4932 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.011547 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-fp9vn" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.022987 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.038703 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.039705 4932 scope.go:117] "RemoveContainer" containerID="7197d1a1f02b016dc74d7997a6b26a25b0f9c10460d84e4ac28ae10ebfb271c4" Nov 25 11:13:37 crc kubenswrapper[4932]: E1125 11:13:37.040022 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-rcv5q_openstack-operators(af96b4c7-e9eb-4609-afab-ba3cc15f0a48)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" podUID="af96b4c7-e9eb-4609-afab-ba3cc15f0a48" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.119440 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.120409 4932 scope.go:117] "RemoveContainer" containerID="4c8fd51ffda023a4309cbf1429435b6c532329159a36feebbcd8898e09f5e26c" Nov 25 11:13:37 crc kubenswrapper[4932]: E1125 11:13:37.120723 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-4wph8_openstack-operators(8c014265-53e2-4c4d-9c25-452686712f2e)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" podUID="8c014265-53e2-4c4d-9c25-452686712f2e" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.137630 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.152503 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.229005 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.232586 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.233723 4932 scope.go:117] "RemoveContainer" containerID="f146425e746d7c86d256614de1eb772b459a6ba29f81bc051e76f5ff8aab9e6d" Nov 25 11:13:37 crc kubenswrapper[4932]: E1125 11:13:37.234019 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-cwqvg_openstack-operators(243ff257-9836-4e43-9228-e05f18282650)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" podUID="243ff257-9836-4e43-9228-e05f18282650" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.246118 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.247466 4932 scope.go:117] "RemoveContainer" containerID="753ce2405a7150ab8cf76e7d80a19d6725906738039472a86f69449bcceabb6d" Nov 25 11:13:37 crc kubenswrapper[4932]: E1125 11:13:37.247969 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-d5cc86f4b-5bkct_openstack-operators(765f296f-cd42-4f2c-9b21-2bcbc65d490c)\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" podUID="765f296f-cd42-4f2c-9b21-2bcbc65d490c" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.274645 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.426783 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.462129 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.464619 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.465512 4932 scope.go:117] "RemoveContainer" containerID="4565b24aaac5b06b87d85cb6970b4ed78a8f21a3363de55c7f178311e200c9a3" Nov 25 11:13:37 crc kubenswrapper[4932]: E1125 11:13:37.465801 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-tkjb4_openstack-operators(d4860edf-9f45-4dd2-8e35-7c3a4444370a)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" podUID="d4860edf-9f45-4dd2-8e35-7c3a4444370a" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.491732 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.492585 4932 scope.go:117] "RemoveContainer" containerID="5e49d66515e26860a98b9b2132fd198f0f788a7552146e7ddb3fe047848e46c0" Nov 25 11:13:37 crc kubenswrapper[4932]: E1125 11:13:37.492835 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-567f98c9d-blm28_openstack-operators(695ce8a3-6a30-42a4-8ba2-f6309470362c)\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" podUID="695ce8a3-6a30-42a4-8ba2-f6309470362c" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.649220 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.650207 4932 scope.go:117] "RemoveContainer" containerID="6e3f0bcabd684bef0f057b442d25c0f22b782e20ed02489a6197bad31126bb0a" Nov 25 11:13:37 crc kubenswrapper[4932]: E1125 11:13:37.650544 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=watcher-operator-controller-manager-864885998-pkjjd_openstack-operators(45ebb480-733b-47a3-a682-8fe0be16eb78)\"" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" podUID="45ebb480-733b-47a3-a682-8fe0be16eb78" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.681215 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-ovndbs" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.735855 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-config-data" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.756806 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-cvwkj" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.780119 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.793924 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.850648 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.889324 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.945782 4932 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.961928 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 11:13:37 crc kubenswrapper[4932]: I1125 11:13:37.972122 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.019065 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.055824 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.074227 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.152035 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.170179 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.191700 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-tqwz9" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.362256 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.399662 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.514830 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.528591 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.583006 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.622516 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.632095 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.705763 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.713480 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.726541 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.737259 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.752025 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-nxgm7" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.771014 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.855589 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.856098 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.862318 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.896827 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-pbm5f" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.903397 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-q5brb" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.922598 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.954420 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 11:13:38 crc kubenswrapper[4932]: I1125 11:13:38.999730 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 11:13:39 crc kubenswrapper[4932]: I1125 11:13:39.040737 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 11:13:39 crc kubenswrapper[4932]: I1125 11:13:39.043672 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 11:13:39 crc kubenswrapper[4932]: I1125 11:13:39.106679 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 11:13:39 crc kubenswrapper[4932]: I1125 11:13:39.132007 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 11:13:39 crc kubenswrapper[4932]: I1125 11:13:39.137184 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 11:13:39 crc kubenswrapper[4932]: I1125 11:13:39.310398 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-vdfwn" Nov 25 11:13:39 crc kubenswrapper[4932]: I1125 11:13:39.557790 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 25 11:13:39 crc kubenswrapper[4932]: I1125 11:13:39.582512 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 11:13:39 crc kubenswrapper[4932]: I1125 11:13:39.607055 4932 scope.go:117] "RemoveContainer" containerID="78202d006338b185ad300800279cc203ec2b3a603a818b3c1667f12a4c7afe7a" Nov 25 11:13:39 crc kubenswrapper[4932]: I1125 11:13:39.624512 4932 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 11:13:39 crc kubenswrapper[4932]: I1125 11:13:39.855138 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 11:13:40 crc kubenswrapper[4932]: I1125 11:13:40.088586 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" event={"ID":"b7db3ea2-66e6-46f2-93b4-4c8405a1b566","Type":"ContainerStarted","Data":"7bd69aab6afbc7d9415594d350090a54b4e936327c4e36f5ac062ac07d061a17"} Nov 25 11:13:40 crc kubenswrapper[4932]: I1125 11:13:40.088830 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 11:13:40 crc kubenswrapper[4932]: I1125 11:13:40.441171 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 11:13:40 crc kubenswrapper[4932]: I1125 11:13:40.503157 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 11:13:40 crc kubenswrapper[4932]: I1125 11:13:40.634456 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.100383 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.100680 4932 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="8d9fa95132aa8985559983253b5d2d61094b2a502076f4a14c011df3a4ae9e86" exitCode=137 Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.264242 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7cd5954d9-bdswv" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.268319 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.268376 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.350485 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.414466 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.415072 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.414615 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.415222 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.415434 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.415516 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.415635 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.415798 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.415915 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.417389 4932 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.417562 4932 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.417648 4932 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.417723 4932 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.423737 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 11:13:41 crc kubenswrapper[4932]: I1125 11:13:41.520034 4932 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 11:13:42 crc kubenswrapper[4932]: I1125 11:13:42.111472 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 11:13:42 crc kubenswrapper[4932]: I1125 11:13:42.111907 4932 scope.go:117] "RemoveContainer" containerID="8d9fa95132aa8985559983253b5d2d61094b2a502076f4a14c011df3a4ae9e86" Nov 25 11:13:42 crc kubenswrapper[4932]: I1125 11:13:42.112013 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 11:13:42 crc kubenswrapper[4932]: I1125 11:13:42.606764 4932 scope.go:117] "RemoveContainer" containerID="17897f06fab4c172257a616b48b13b26ffaa2180c75f533e1a8b11f1804ed610" Nov 25 11:13:42 crc kubenswrapper[4932]: I1125 11:13:42.619961 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 25 11:13:44 crc kubenswrapper[4932]: I1125 11:13:44.134317 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"92b56b05-8133-4f01-a855-7bb7f523b38c","Type":"ContainerStarted","Data":"ec21866e5648a63ed27e3c2692bba87a651ee711fc8effcb12c314aa6797aa5a"} Nov 25 11:13:44 crc kubenswrapper[4932]: I1125 11:13:44.134830 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 11:13:44 crc kubenswrapper[4932]: I1125 11:13:44.606251 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:13:44 crc kubenswrapper[4932]: E1125 11:13:44.607001 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.460089 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.461424 4932 scope.go:117] "RemoveContainer" containerID="6be6ce6f4fadd03ca1931f5c7ec31d8f935ad210f55914574ed58a70d4290f33" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.461747 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=barbican-operator-controller-manager-86dc4d89c8-ps52v_openstack-operators(96d031ad-3550-4423-9422-93911c9a8217)\"" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" podUID="96d031ad-3550-4423-9422-93911c9a8217" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.481334 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.481788 4932 scope.go:117] "RemoveContainer" containerID="9c220f165ee1f3c3ddc51a1485a6a95002a888bc55043315accf3f4d92519d3e" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.482087 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=cinder-operator-controller-manager-79856dc55c-t6t6s_openstack-operators(bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd)\"" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" podUID="bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.499621 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.500450 4932 scope.go:117] "RemoveContainer" containerID="46e9334f161851ff2c82016c6fe298052cbbe11761a6447845774536bf5f8cb0" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.500721 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=designate-operator-controller-manager-7d695c9b56-m2tpg_openstack-operators(a92ad4a6-d922-45c1-b02d-f382b1ea1cc0)\"" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" podUID="a92ad4a6-d922-45c1-b02d-f382b1ea1cc0" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.593962 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.594819 4932 scope.go:117] "RemoveContainer" containerID="5698c0f6c7d8a2894640c72e0feabb127c272ffed7be3ff4537b11a55984e5c7" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.595163 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=heat-operator-controller-manager-774b86978c-bk2nv_openstack-operators(d2216d92-9e2d-4549-b634-63ec3ada9f14)\"" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" podUID="d2216d92-9e2d-4549-b634-63ec3ada9f14" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.619367 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.620257 4932 scope.go:117] "RemoveContainer" containerID="2fe6efa081a343abbc63f3989472265ad235422b11ff49cb092994e153842585" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.620564 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=horizon-operator-controller-manager-68c9694994-jg9pn_openstack-operators(65fb5603-367e-431f-a8d3-0a3281a70361)\"" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" podUID="65fb5603-367e-431f-a8d3-0a3281a70361" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.832770 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.834362 4932 scope.go:117] "RemoveContainer" containerID="594e0fdeba523919767ce551fe3eaa2f10957060a840a23fe357121030cfff70" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.834696 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=glance-operator-controller-manager-68b95954c9-x4l6r_openstack-operators(bde38973-f401-4917-8abc-08dafaf8f10c)\"" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" podUID="bde38973-f401-4917-8abc-08dafaf8f10c" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.865762 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.866653 4932 scope.go:117] "RemoveContainer" containerID="0b8fad67fbb4ad2b8197478c887396dd37068d0c413e4508f01b5df4df8b7ff8" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.866922 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-5bfcdc958c-b9l7b_openstack-operators(6dedf441-145d-4642-a0f0-fb691d2edd2d)\"" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" podUID="6dedf441-145d-4642-a0f0-fb691d2edd2d" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.894836 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.895722 4932 scope.go:117] "RemoveContainer" containerID="95bc213b96c2e40877abef34c4b3f3c23263937e72406cef1e77d8741f61515c" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.895980 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.895984 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-rm8qr_openstack-operators(070a395c-8ac5-4303-80fb-7f93282a9f99)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" podUID="070a395c-8ac5-4303-80fb-7f93282a9f99" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.896960 4932 scope.go:117] "RemoveContainer" containerID="6c84cbc7d3b16bab76c6590d21ef354cebbad8cfcb201a6c57bec2c6cf509ed2" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.897496 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=keystone-operator-controller-manager-748dc6576f-fjlpt_openstack-operators(12f70ae4-14e2-4eed-9c1d-29e380a6d757)\"" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" podUID="12f70ae4-14e2-4eed-9c1d-29e380a6d757" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.927468 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.928376 4932 scope.go:117] "RemoveContainer" containerID="321d4419e8cad686272e3fceacb1607751d371e203ccd3a4a5675f0a1fc8cf13" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.928700 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=mariadb-operator-controller-manager-cb6c4fdb7-phkzd_openstack-operators(6fcca084-72cb-48ba-948f-6c4d861f6096)\"" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" podUID="6fcca084-72cb-48ba-948f-6c4d861f6096" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.959250 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.960185 4932 scope.go:117] "RemoveContainer" containerID="ca1b590d0f66ab283d53143a55c1d064fd2eec81f8fc8a59c3fabeba9129fddb" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.960560 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-fd75fd47d-q7rt6_openstack-operators(dae34761-581e-4f65-8d7c-d6c2d302b4f7)\"" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" podUID="dae34761-581e-4f65-8d7c-d6c2d302b4f7" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.962876 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 11:13:46 crc kubenswrapper[4932]: I1125 11:13:46.963701 4932 scope.go:117] "RemoveContainer" containerID="47cd7872ffbfdde95460a0502578f2e60883297afc055bd2b7abdbbc78d9e15e" Nov 25 11:13:46 crc kubenswrapper[4932]: E1125 11:13:46.963984 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=manila-operator-controller-manager-58bb8d67cc-bbmvf_openstack-operators(e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d)\"" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" podUID="e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.038408 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.039346 4932 scope.go:117] "RemoveContainer" containerID="7197d1a1f02b016dc74d7997a6b26a25b0f9c10460d84e4ac28ae10ebfb271c4" Nov 25 11:13:47 crc kubenswrapper[4932]: E1125 11:13:47.039649 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ovn-operator-controller-manager-66cf5c67ff-rcv5q_openstack-operators(af96b4c7-e9eb-4609-afab-ba3cc15f0a48)\"" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" podUID="af96b4c7-e9eb-4609-afab-ba3cc15f0a48" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.118985 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.120309 4932 scope.go:117] "RemoveContainer" containerID="4c8fd51ffda023a4309cbf1429435b6c532329159a36feebbcd8898e09f5e26c" Nov 25 11:13:47 crc kubenswrapper[4932]: E1125 11:13:47.120722 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=placement-operator-controller-manager-5db546f9d9-4wph8_openstack-operators(8c014265-53e2-4c4d-9c25-452686712f2e)\"" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" podUID="8c014265-53e2-4c4d-9c25-452686712f2e" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.232455 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.233473 4932 scope.go:117] "RemoveContainer" containerID="f146425e746d7c86d256614de1eb772b459a6ba29f81bc051e76f5ff8aab9e6d" Nov 25 11:13:47 crc kubenswrapper[4932]: E1125 11:13:47.233782 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=neutron-operator-controller-manager-7c57c8bbc4-cwqvg_openstack-operators(243ff257-9836-4e43-9228-e05f18282650)\"" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" podUID="243ff257-9836-4e43-9228-e05f18282650" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.245340 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.246305 4932 scope.go:117] "RemoveContainer" containerID="753ce2405a7150ab8cf76e7d80a19d6725906738039472a86f69449bcceabb6d" Nov 25 11:13:47 crc kubenswrapper[4932]: E1125 11:13:47.246594 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-d5cc86f4b-5bkct_openstack-operators(765f296f-cd42-4f2c-9b21-2bcbc65d490c)\"" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" podUID="765f296f-cd42-4f2c-9b21-2bcbc65d490c" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.465252 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.466004 4932 scope.go:117] "RemoveContainer" containerID="4565b24aaac5b06b87d85cb6970b4ed78a8f21a3363de55c7f178311e200c9a3" Nov 25 11:13:47 crc kubenswrapper[4932]: E1125 11:13:47.466314 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=swift-operator-controller-manager-6fdc4fcf86-tkjb4_openstack-operators(d4860edf-9f45-4dd2-8e35-7c3a4444370a)\"" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" podUID="d4860edf-9f45-4dd2-8e35-7c3a4444370a" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.491438 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.492357 4932 scope.go:117] "RemoveContainer" containerID="5e49d66515e26860a98b9b2132fd198f0f788a7552146e7ddb3fe047848e46c0" Nov 25 11:13:47 crc kubenswrapper[4932]: E1125 11:13:47.492795 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=telemetry-operator-controller-manager-567f98c9d-blm28_openstack-operators(695ce8a3-6a30-42a4-8ba2-f6309470362c)\"" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" podUID="695ce8a3-6a30-42a4-8ba2-f6309470362c" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.649523 4932 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 11:13:47 crc kubenswrapper[4932]: I1125 11:13:47.650523 4932 scope.go:117] "RemoveContainer" containerID="6e3f0bcabd684bef0f057b442d25c0f22b782e20ed02489a6197bad31126bb0a" Nov 25 11:13:48 crc kubenswrapper[4932]: I1125 11:13:48.174922 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" event={"ID":"45ebb480-733b-47a3-a682-8fe0be16eb78","Type":"ContainerStarted","Data":"26f84dea38e3b2170758d3073afe13c7654e9abbca4af019964ac726e44b32ea"} Nov 25 11:13:48 crc kubenswrapper[4932]: I1125 11:13:48.175807 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 11:13:48 crc kubenswrapper[4932]: I1125 11:13:48.466933 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 11:13:56 crc kubenswrapper[4932]: I1125 11:13:56.605976 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:13:56 crc kubenswrapper[4932]: E1125 11:13:56.606921 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:13:57 crc kubenswrapper[4932]: I1125 11:13:57.606837 4932 scope.go:117] "RemoveContainer" containerID="9c220f165ee1f3c3ddc51a1485a6a95002a888bc55043315accf3f4d92519d3e" Nov 25 11:13:57 crc kubenswrapper[4932]: I1125 11:13:57.606922 4932 scope.go:117] "RemoveContainer" containerID="321d4419e8cad686272e3fceacb1607751d371e203ccd3a4a5675f0a1fc8cf13" Nov 25 11:13:57 crc kubenswrapper[4932]: I1125 11:13:57.607383 4932 scope.go:117] "RemoveContainer" containerID="6be6ce6f4fadd03ca1931f5c7ec31d8f935ad210f55914574ed58a70d4290f33" Nov 25 11:13:57 crc kubenswrapper[4932]: I1125 11:13:57.652534 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-864885998-pkjjd" Nov 25 11:13:58 crc kubenswrapper[4932]: I1125 11:13:58.297710 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" event={"ID":"bd60c8a4-1de7-45b5-9ae6-d0bd9604cffd","Type":"ContainerStarted","Data":"049d5286648115e45aee3bf994ac5d259349fba6437fb98cf236d7f569f30bd7"} Nov 25 11:13:58 crc kubenswrapper[4932]: I1125 11:13:58.298363 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 11:13:58 crc kubenswrapper[4932]: I1125 11:13:58.299890 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" event={"ID":"6fcca084-72cb-48ba-948f-6c4d861f6096","Type":"ContainerStarted","Data":"eb967abb6c0df70e53088df7c99df9e8cee39211e40c1d3bd16351fac3fa1072"} Nov 25 11:13:58 crc kubenswrapper[4932]: I1125 11:13:58.300456 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 11:13:58 crc kubenswrapper[4932]: I1125 11:13:58.335835 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" event={"ID":"96d031ad-3550-4423-9422-93911c9a8217","Type":"ContainerStarted","Data":"d48c8bfcf0f9ffe72fe8b92ad0dbcfe0c5378207bdfd158427c1495926df5044"} Nov 25 11:13:58 crc kubenswrapper[4932]: I1125 11:13:58.336353 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 11:13:58 crc kubenswrapper[4932]: I1125 11:13:58.606557 4932 scope.go:117] "RemoveContainer" containerID="46e9334f161851ff2c82016c6fe298052cbbe11761a6447845774536bf5f8cb0" Nov 25 11:13:58 crc kubenswrapper[4932]: I1125 11:13:58.606738 4932 scope.go:117] "RemoveContainer" containerID="4565b24aaac5b06b87d85cb6970b4ed78a8f21a3363de55c7f178311e200c9a3" Nov 25 11:13:59 crc kubenswrapper[4932]: I1125 11:13:59.348949 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" event={"ID":"d4860edf-9f45-4dd2-8e35-7c3a4444370a","Type":"ContainerStarted","Data":"cdacc64826096e2d3ba12636764ac0cf2bdb3eae9bee00b0f5471263427162f7"} Nov 25 11:13:59 crc kubenswrapper[4932]: I1125 11:13:59.349476 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 11:13:59 crc kubenswrapper[4932]: I1125 11:13:59.351537 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" event={"ID":"a92ad4a6-d922-45c1-b02d-f382b1ea1cc0","Type":"ContainerStarted","Data":"3118e2b5facf41a449fc817e9166defbf98408821cb0670dadb0a821d732280d"} Nov 25 11:13:59 crc kubenswrapper[4932]: I1125 11:13:59.606677 4932 scope.go:117] "RemoveContainer" containerID="ca1b590d0f66ab283d53143a55c1d064fd2eec81f8fc8a59c3fabeba9129fddb" Nov 25 11:13:59 crc kubenswrapper[4932]: I1125 11:13:59.606801 4932 scope.go:117] "RemoveContainer" containerID="2fe6efa081a343abbc63f3989472265ad235422b11ff49cb092994e153842585" Nov 25 11:13:59 crc kubenswrapper[4932]: I1125 11:13:59.606998 4932 scope.go:117] "RemoveContainer" containerID="753ce2405a7150ab8cf76e7d80a19d6725906738039472a86f69449bcceabb6d" Nov 25 11:13:59 crc kubenswrapper[4932]: I1125 11:13:59.607346 4932 scope.go:117] "RemoveContainer" containerID="f146425e746d7c86d256614de1eb772b459a6ba29f81bc051e76f5ff8aab9e6d" Nov 25 11:14:00 crc kubenswrapper[4932]: I1125 11:14:00.363448 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" event={"ID":"243ff257-9836-4e43-9228-e05f18282650","Type":"ContainerStarted","Data":"85d8a035dea6f188a38e418dddb4efc8a0303a2f4e45c0aed3f3c4ba469810ad"} Nov 25 11:14:00 crc kubenswrapper[4932]: I1125 11:14:00.364040 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 11:14:00 crc kubenswrapper[4932]: I1125 11:14:00.366931 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" event={"ID":"dae34761-581e-4f65-8d7c-d6c2d302b4f7","Type":"ContainerStarted","Data":"861e69b703b88ddb768c4859dab040d730dcefcdd2a12c249edaa7d59f94aafe"} Nov 25 11:14:00 crc kubenswrapper[4932]: I1125 11:14:00.367241 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 11:14:00 crc kubenswrapper[4932]: I1125 11:14:00.369717 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" event={"ID":"65fb5603-367e-431f-a8d3-0a3281a70361","Type":"ContainerStarted","Data":"bdc63e8e65a4724a3beb3a74da28520af4f06f98b7cafe92042faa1427ebfac4"} Nov 25 11:14:00 crc kubenswrapper[4932]: I1125 11:14:00.370507 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 11:14:00 crc kubenswrapper[4932]: I1125 11:14:00.373173 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" event={"ID":"765f296f-cd42-4f2c-9b21-2bcbc65d490c","Type":"ContainerStarted","Data":"e15c8b07c7aaa26b1a34f101c561b7030cd1dd73b63641a116c5d8d7484983c8"} Nov 25 11:14:00 crc kubenswrapper[4932]: I1125 11:14:00.373469 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 11:14:00 crc kubenswrapper[4932]: I1125 11:14:00.616249 4932 scope.go:117] "RemoveContainer" containerID="4c8fd51ffda023a4309cbf1429435b6c532329159a36feebbcd8898e09f5e26c" Nov 25 11:14:00 crc kubenswrapper[4932]: I1125 11:14:00.616853 4932 scope.go:117] "RemoveContainer" containerID="7197d1a1f02b016dc74d7997a6b26a25b0f9c10460d84e4ac28ae10ebfb271c4" Nov 25 11:14:00 crc kubenswrapper[4932]: I1125 11:14:00.618118 4932 scope.go:117] "RemoveContainer" containerID="5e49d66515e26860a98b9b2132fd198f0f788a7552146e7ddb3fe047848e46c0" Nov 25 11:14:01 crc kubenswrapper[4932]: I1125 11:14:01.386022 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" event={"ID":"8c014265-53e2-4c4d-9c25-452686712f2e","Type":"ContainerStarted","Data":"600e71a44a5050b30f2d9dd8da7c33e48a473a80b090da3496dadee96ce2c3c0"} Nov 25 11:14:01 crc kubenswrapper[4932]: I1125 11:14:01.386608 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 11:14:01 crc kubenswrapper[4932]: I1125 11:14:01.388605 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" event={"ID":"af96b4c7-e9eb-4609-afab-ba3cc15f0a48","Type":"ContainerStarted","Data":"002c521bba695aee72e44a5e66f0c87029d41ff60b5c8d27e9d18dac48748613"} Nov 25 11:14:01 crc kubenswrapper[4932]: I1125 11:14:01.389202 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 11:14:01 crc kubenswrapper[4932]: I1125 11:14:01.391375 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" event={"ID":"695ce8a3-6a30-42a4-8ba2-f6309470362c","Type":"ContainerStarted","Data":"33821819ab328d9b2fc8f425bff123a48ffa5437022920c62140aab9a1acbbbe"} Nov 25 11:14:01 crc kubenswrapper[4932]: I1125 11:14:01.606475 4932 scope.go:117] "RemoveContainer" containerID="594e0fdeba523919767ce551fe3eaa2f10957060a840a23fe357121030cfff70" Nov 25 11:14:01 crc kubenswrapper[4932]: I1125 11:14:01.607029 4932 scope.go:117] "RemoveContainer" containerID="5698c0f6c7d8a2894640c72e0feabb127c272ffed7be3ff4537b11a55984e5c7" Nov 25 11:14:01 crc kubenswrapper[4932]: I1125 11:14:01.608117 4932 scope.go:117] "RemoveContainer" containerID="6c84cbc7d3b16bab76c6590d21ef354cebbad8cfcb201a6c57bec2c6cf509ed2" Nov 25 11:14:01 crc kubenswrapper[4932]: I1125 11:14:01.608475 4932 scope.go:117] "RemoveContainer" containerID="0b8fad67fbb4ad2b8197478c887396dd37068d0c413e4508f01b5df4df8b7ff8" Nov 25 11:14:02 crc kubenswrapper[4932]: I1125 11:14:02.402846 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" event={"ID":"d2216d92-9e2d-4549-b634-63ec3ada9f14","Type":"ContainerStarted","Data":"8bcea0d4435a13b317ab4a10e982c040fbf59e8f1bbeff31dca5c25257432b5a"} Nov 25 11:14:02 crc kubenswrapper[4932]: I1125 11:14:02.403644 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 11:14:02 crc kubenswrapper[4932]: I1125 11:14:02.404825 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" event={"ID":"6dedf441-145d-4642-a0f0-fb691d2edd2d","Type":"ContainerStarted","Data":"4d7f05c485f4309f3354a260b82b934bb0c739a472650317c3f6100086ee873f"} Nov 25 11:14:02 crc kubenswrapper[4932]: I1125 11:14:02.405422 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 11:14:02 crc kubenswrapper[4932]: I1125 11:14:02.408069 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" event={"ID":"12f70ae4-14e2-4eed-9c1d-29e380a6d757","Type":"ContainerStarted","Data":"f71856df8446771140b9985df433e2220e122f9f4aefb6f3d39d253d8ee6440e"} Nov 25 11:14:02 crc kubenswrapper[4932]: I1125 11:14:02.409088 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 11:14:02 crc kubenswrapper[4932]: I1125 11:14:02.413217 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" event={"ID":"bde38973-f401-4917-8abc-08dafaf8f10c","Type":"ContainerStarted","Data":"bfccdaaf8f2432d4073231e6d920df57fb19de6bc9e89db8dcef11cf601a0b26"} Nov 25 11:14:02 crc kubenswrapper[4932]: I1125 11:14:02.414085 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 11:14:02 crc kubenswrapper[4932]: I1125 11:14:02.607143 4932 scope.go:117] "RemoveContainer" containerID="95bc213b96c2e40877abef34c4b3f3c23263937e72406cef1e77d8741f61515c" Nov 25 11:14:02 crc kubenswrapper[4932]: I1125 11:14:02.607893 4932 scope.go:117] "RemoveContainer" containerID="47cd7872ffbfdde95460a0502578f2e60883297afc055bd2b7abdbbc78d9e15e" Nov 25 11:14:03 crc kubenswrapper[4932]: I1125 11:14:03.429573 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" event={"ID":"e5a4b4dd-4498-4ab5-9ca1-1ac3ab836e5d","Type":"ContainerStarted","Data":"dc686a0345e4b0e0347efdf915a06932f1af28fcfd4e04ffbb93f9e54c44b504"} Nov 25 11:14:03 crc kubenswrapper[4932]: I1125 11:14:03.430136 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 11:14:03 crc kubenswrapper[4932]: I1125 11:14:03.435731 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" event={"ID":"070a395c-8ac5-4303-80fb-7f93282a9f99","Type":"ContainerStarted","Data":"791193bf750256dee67e0ff2f7a8fe19a372cc1dabdd58566a9c6d0b93eeea77"} Nov 25 11:14:04 crc kubenswrapper[4932]: I1125 11:14:04.448278 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 25 11:14:04 crc kubenswrapper[4932]: I1125 11:14:04.451094 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 11:14:04 crc kubenswrapper[4932]: I1125 11:14:04.451141 4932 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="c48febefcc9258f783b207adcbe934f8260825b0b936d2d1d85c4de7fcea5a99" exitCode=137 Nov 25 11:14:04 crc kubenswrapper[4932]: I1125 11:14:04.451510 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"c48febefcc9258f783b207adcbe934f8260825b0b936d2d1d85c4de7fcea5a99"} Nov 25 11:14:04 crc kubenswrapper[4932]: I1125 11:14:04.451789 4932 scope.go:117] "RemoveContainer" containerID="ac40341dab6694e0ef531df8c345f25c753d730db143b2a29816379814321b79" Nov 25 11:14:04 crc kubenswrapper[4932]: I1125 11:14:04.509459 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 11:14:05 crc kubenswrapper[4932]: I1125 11:14:05.467031 4932 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 25 11:14:05 crc kubenswrapper[4932]: I1125 11:14:05.468871 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"99f33cc3ccfd8ed33d057a3b9eeff3e2599472c6f0b149adb648b424a450e44a"} Nov 25 11:14:06 crc kubenswrapper[4932]: I1125 11:14:06.462454 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-86dc4d89c8-ps52v" Nov 25 11:14:06 crc kubenswrapper[4932]: I1125 11:14:06.485640 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-79856dc55c-t6t6s" Nov 25 11:14:06 crc kubenswrapper[4932]: I1125 11:14:06.500452 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 11:14:06 crc kubenswrapper[4932]: I1125 11:14:06.502589 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-7d695c9b56-m2tpg" Nov 25 11:14:06 crc kubenswrapper[4932]: I1125 11:14:06.622340 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c9694994-jg9pn" Nov 25 11:14:06 crc kubenswrapper[4932]: I1125 11:14:06.895555 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 11:14:06 crc kubenswrapper[4932]: I1125 11:14:06.929737 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-cb6c4fdb7-phkzd" Nov 25 11:14:06 crc kubenswrapper[4932]: I1125 11:14:06.962701 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-fd75fd47d-q7rt6" Nov 25 11:14:07 crc kubenswrapper[4932]: I1125 11:14:07.039274 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-66cf5c67ff-rcv5q" Nov 25 11:14:07 crc kubenswrapper[4932]: I1125 11:14:07.121243 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5db546f9d9-4wph8" Nov 25 11:14:07 crc kubenswrapper[4932]: I1125 11:14:07.245989 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-7c57c8bbc4-cwqvg" Nov 25 11:14:07 crc kubenswrapper[4932]: I1125 11:14:07.252629 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-d5cc86f4b-5bkct" Nov 25 11:14:07 crc kubenswrapper[4932]: I1125 11:14:07.466949 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-6fdc4fcf86-tkjb4" Nov 25 11:14:07 crc kubenswrapper[4932]: I1125 11:14:07.491964 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 11:14:07 crc kubenswrapper[4932]: I1125 11:14:07.493902 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-567f98c9d-blm28" Nov 25 11:14:09 crc kubenswrapper[4932]: I1125 11:14:09.181704 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 11:14:11 crc kubenswrapper[4932]: I1125 11:14:11.605861 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:14:11 crc kubenswrapper[4932]: E1125 11:14:11.606532 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:14:14 crc kubenswrapper[4932]: I1125 11:14:14.154547 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 11:14:14 crc kubenswrapper[4932]: I1125 11:14:14.157844 4932 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 11:14:14 crc kubenswrapper[4932]: I1125 11:14:14.939387 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-76dcd9496-2bqxl" Nov 25 11:14:16 crc kubenswrapper[4932]: I1125 11:14:16.602045 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-774b86978c-bk2nv" Nov 25 11:14:16 crc kubenswrapper[4932]: I1125 11:14:16.837163 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-68b95954c9-x4l6r" Nov 25 11:14:16 crc kubenswrapper[4932]: I1125 11:14:16.867378 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5bfcdc958c-b9l7b" Nov 25 11:14:16 crc kubenswrapper[4932]: I1125 11:14:16.898048 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-748dc6576f-fjlpt" Nov 25 11:14:16 crc kubenswrapper[4932]: I1125 11:14:16.900023 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-rm8qr" Nov 25 11:14:16 crc kubenswrapper[4932]: I1125 11:14:16.965244 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58bb8d67cc-bbmvf" Nov 25 11:14:17 crc kubenswrapper[4932]: I1125 11:14:17.621945 4932 generic.go:334] "Generic (PLEG): container finished" podID="4530a78c-b3ce-425c-bad3-c8821d4de544" containerID="9e66d5eb73ffc7acf7cc0559fa6c3791d7a9071f7b17d00b8a6c0b13a5e9aa38" exitCode=0 Nov 25 11:14:17 crc kubenswrapper[4932]: I1125 11:14:17.622022 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" event={"ID":"4530a78c-b3ce-425c-bad3-c8821d4de544","Type":"ContainerDied","Data":"9e66d5eb73ffc7acf7cc0559fa6c3791d7a9071f7b17d00b8a6c0b13a5e9aa38"} Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.183489 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.186872 4932 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.245774 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-2\") pod \"4530a78c-b3ce-425c-bad3-c8821d4de544\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.245894 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ssh-key\") pod \"4530a78c-b3ce-425c-bad3-c8821d4de544\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.245929 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqwl8\" (UniqueName: \"kubernetes.io/projected/4530a78c-b3ce-425c-bad3-c8821d4de544-kube-api-access-hqwl8\") pod \"4530a78c-b3ce-425c-bad3-c8821d4de544\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.246001 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-1\") pod \"4530a78c-b3ce-425c-bad3-c8821d4de544\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.246040 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-inventory\") pod \"4530a78c-b3ce-425c-bad3-c8821d4de544\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.246237 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-telemetry-combined-ca-bundle\") pod \"4530a78c-b3ce-425c-bad3-c8821d4de544\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.246283 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-0\") pod \"4530a78c-b3ce-425c-bad3-c8821d4de544\" (UID: \"4530a78c-b3ce-425c-bad3-c8821d4de544\") " Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.254594 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "4530a78c-b3ce-425c-bad3-c8821d4de544" (UID: "4530a78c-b3ce-425c-bad3-c8821d4de544"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.259003 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4530a78c-b3ce-425c-bad3-c8821d4de544-kube-api-access-hqwl8" (OuterVolumeSpecName: "kube-api-access-hqwl8") pod "4530a78c-b3ce-425c-bad3-c8821d4de544" (UID: "4530a78c-b3ce-425c-bad3-c8821d4de544"). InnerVolumeSpecName "kube-api-access-hqwl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.288072 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "4530a78c-b3ce-425c-bad3-c8821d4de544" (UID: "4530a78c-b3ce-425c-bad3-c8821d4de544"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.296735 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "4530a78c-b3ce-425c-bad3-c8821d4de544" (UID: "4530a78c-b3ce-425c-bad3-c8821d4de544"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.299964 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "4530a78c-b3ce-425c-bad3-c8821d4de544" (UID: "4530a78c-b3ce-425c-bad3-c8821d4de544"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.300341 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-inventory" (OuterVolumeSpecName: "inventory") pod "4530a78c-b3ce-425c-bad3-c8821d4de544" (UID: "4530a78c-b3ce-425c-bad3-c8821d4de544"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.303617 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4530a78c-b3ce-425c-bad3-c8821d4de544" (UID: "4530a78c-b3ce-425c-bad3-c8821d4de544"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.348632 4932 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.348678 4932 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.348689 4932 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.348698 4932 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.348711 4932 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.348720 4932 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4530a78c-b3ce-425c-bad3-c8821d4de544-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.348730 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqwl8\" (UniqueName: \"kubernetes.io/projected/4530a78c-b3ce-425c-bad3-c8821d4de544-kube-api-access-hqwl8\") on node \"crc\" DevicePath \"\"" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.657252 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" event={"ID":"4530a78c-b3ce-425c-bad3-c8821d4de544","Type":"ContainerDied","Data":"a4ab40c8c34a40449625270e61f0e8bd0f05265c26a1e563db710c9187e54aac"} Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.657662 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4ab40c8c34a40449625270e61f0e8bd0f05265c26a1e563db710c9187e54aac" Nov 25 11:14:19 crc kubenswrapper[4932]: I1125 11:14:19.657740 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-6wtk6" Nov 25 11:14:23 crc kubenswrapper[4932]: I1125 11:14:23.606778 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:14:23 crc kubenswrapper[4932]: E1125 11:14:23.608011 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.501818 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-r8q9k"] Nov 25 11:14:24 crc kubenswrapper[4932]: E1125 11:14:24.503036 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4530a78c-b3ce-425c-bad3-c8821d4de544" containerName="telemetry-openstack-openstack-cell1" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.503056 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="4530a78c-b3ce-425c-bad3-c8821d4de544" containerName="telemetry-openstack-openstack-cell1" Nov 25 11:14:24 crc kubenswrapper[4932]: E1125 11:14:24.503131 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" containerName="installer" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.503141 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" containerName="installer" Nov 25 11:14:24 crc kubenswrapper[4932]: E1125 11:14:24.503167 4932 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.503175 4932 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.510847 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.510960 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="09b35ad9-d552-4b4c-a28c-837961e2f44a" containerName="installer" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.511009 4932 memory_manager.go:354] "RemoveStaleState removing state" podUID="4530a78c-b3ce-425c-bad3-c8821d4de544" containerName="telemetry-openstack-openstack-cell1" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.512337 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.521299 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.521591 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.522007 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-sriov-agent-neutron-config" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.522640 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.524382 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-275n7" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.526210 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-r8q9k"] Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.678462 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdwzw\" (UniqueName: \"kubernetes.io/projected/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-kube-api-access-vdwzw\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.678537 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.678599 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.678628 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.678682 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.780252 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.780320 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.780394 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.780542 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdwzw\" (UniqueName: \"kubernetes.io/projected/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-kube-api-access-vdwzw\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.780610 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.789359 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.791213 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.791501 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.811378 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.836048 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdwzw\" (UniqueName: \"kubernetes.io/projected/c974ea7a-bc8c-405c-aad4-d30cf18dbff0-kube-api-access-vdwzw\") pod \"neutron-sriov-openstack-openstack-cell1-r8q9k\" (UID: \"c974ea7a-bc8c-405c-aad4-d30cf18dbff0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:24 crc kubenswrapper[4932]: I1125 11:14:24.866128 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" Nov 25 11:14:25 crc kubenswrapper[4932]: I1125 11:14:25.522669 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-r8q9k"] Nov 25 11:14:25 crc kubenswrapper[4932]: I1125 11:14:25.730370 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" event={"ID":"c974ea7a-bc8c-405c-aad4-d30cf18dbff0","Type":"ContainerStarted","Data":"0f6534b7e3cfe48554fccdb453cfde434444ee8f09cfe7cfa8bde2343aea47b8"} Nov 25 11:14:26 crc kubenswrapper[4932]: I1125 11:14:26.744318 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" event={"ID":"c974ea7a-bc8c-405c-aad4-d30cf18dbff0","Type":"ContainerStarted","Data":"70caba835c9c9afdbbe4a38814b5f2b0aa4bd2aa1b425f2928080f084535ac58"} Nov 25 11:14:26 crc kubenswrapper[4932]: I1125 11:14:26.795006 4932 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-sriov-openstack-openstack-cell1-r8q9k" podStartSLOduration=2.401506587 podStartE2EDuration="2.794982209s" podCreationTimestamp="2025-11-25 11:14:24 +0000 UTC" firstStartedPulling="2025-11-25 11:14:25.532774542 +0000 UTC m=+8725.658804105" lastFinishedPulling="2025-11-25 11:14:25.926250164 +0000 UTC m=+8726.052279727" observedRunningTime="2025-11-25 11:14:26.761492966 +0000 UTC m=+8726.887522529" watchObservedRunningTime="2025-11-25 11:14:26.794982209 +0000 UTC m=+8726.921011772" Nov 25 11:14:34 crc kubenswrapper[4932]: I1125 11:14:34.606659 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:14:34 crc kubenswrapper[4932]: E1125 11:14:34.607420 4932 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-plbqh_openshift-machine-config-operator(fc52f208-3635-4b33-a1f2-720bcff56064)\"" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" Nov 25 11:14:46 crc kubenswrapper[4932]: I1125 11:14:46.611244 4932 scope.go:117] "RemoveContainer" containerID="eaa0c5d3aeae5c459b5ded069d39f8f1484773becfd636633c955fbb1619f20f" Nov 25 11:14:47 crc kubenswrapper[4932]: I1125 11:14:47.023943 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" event={"ID":"fc52f208-3635-4b33-a1f2-720bcff56064","Type":"ContainerStarted","Data":"5f932a53a40fa5340595562a172134613fd084a69392becfa42d6124a60fd1a4"} Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.174436 4932 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b"] Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.177306 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.179489 4932 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.180578 4932 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.189376 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b"] Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.350210 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8wsw\" (UniqueName: \"kubernetes.io/projected/2f73e550-39b8-4815-8a79-d77856170790-kube-api-access-p8wsw\") pod \"collect-profiles-29401155-98r5b\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.350522 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f73e550-39b8-4815-8a79-d77856170790-secret-volume\") pod \"collect-profiles-29401155-98r5b\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.350559 4932 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f73e550-39b8-4815-8a79-d77856170790-config-volume\") pod \"collect-profiles-29401155-98r5b\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.452567 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f73e550-39b8-4815-8a79-d77856170790-secret-volume\") pod \"collect-profiles-29401155-98r5b\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.452632 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f73e550-39b8-4815-8a79-d77856170790-config-volume\") pod \"collect-profiles-29401155-98r5b\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.452673 4932 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8wsw\" (UniqueName: \"kubernetes.io/projected/2f73e550-39b8-4815-8a79-d77856170790-kube-api-access-p8wsw\") pod \"collect-profiles-29401155-98r5b\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.453741 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f73e550-39b8-4815-8a79-d77856170790-config-volume\") pod \"collect-profiles-29401155-98r5b\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.460643 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f73e550-39b8-4815-8a79-d77856170790-secret-volume\") pod \"collect-profiles-29401155-98r5b\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.471555 4932 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8wsw\" (UniqueName: \"kubernetes.io/projected/2f73e550-39b8-4815-8a79-d77856170790-kube-api-access-p8wsw\") pod \"collect-profiles-29401155-98r5b\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:00 crc kubenswrapper[4932]: I1125 11:15:00.502271 4932 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:01 crc kubenswrapper[4932]: I1125 11:15:01.243165 4932 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b"] Nov 25 11:15:02 crc kubenswrapper[4932]: I1125 11:15:02.173417 4932 generic.go:334] "Generic (PLEG): container finished" podID="2f73e550-39b8-4815-8a79-d77856170790" containerID="a4c239fe112ef4ae58477022583afce8beeedb4012da29e5e9748eaa48838b63" exitCode=0 Nov 25 11:15:02 crc kubenswrapper[4932]: I1125 11:15:02.173458 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" event={"ID":"2f73e550-39b8-4815-8a79-d77856170790","Type":"ContainerDied","Data":"a4c239fe112ef4ae58477022583afce8beeedb4012da29e5e9748eaa48838b63"} Nov 25 11:15:02 crc kubenswrapper[4932]: I1125 11:15:02.173712 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" event={"ID":"2f73e550-39b8-4815-8a79-d77856170790","Type":"ContainerStarted","Data":"182bb9f8442eea490011715974fcde9a684703e4fe4db7f2333dbd56682ff1ea"} Nov 25 11:15:03 crc kubenswrapper[4932]: I1125 11:15:03.710229 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:03 crc kubenswrapper[4932]: I1125 11:15:03.740526 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f73e550-39b8-4815-8a79-d77856170790-secret-volume\") pod \"2f73e550-39b8-4815-8a79-d77856170790\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " Nov 25 11:15:03 crc kubenswrapper[4932]: I1125 11:15:03.740590 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f73e550-39b8-4815-8a79-d77856170790-config-volume\") pod \"2f73e550-39b8-4815-8a79-d77856170790\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " Nov 25 11:15:03 crc kubenswrapper[4932]: I1125 11:15:03.740902 4932 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8wsw\" (UniqueName: \"kubernetes.io/projected/2f73e550-39b8-4815-8a79-d77856170790-kube-api-access-p8wsw\") pod \"2f73e550-39b8-4815-8a79-d77856170790\" (UID: \"2f73e550-39b8-4815-8a79-d77856170790\") " Nov 25 11:15:03 crc kubenswrapper[4932]: I1125 11:15:03.757820 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f73e550-39b8-4815-8a79-d77856170790-config-volume" (OuterVolumeSpecName: "config-volume") pod "2f73e550-39b8-4815-8a79-d77856170790" (UID: "2f73e550-39b8-4815-8a79-d77856170790"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 11:15:03 crc kubenswrapper[4932]: I1125 11:15:03.761621 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f73e550-39b8-4815-8a79-d77856170790-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2f73e550-39b8-4815-8a79-d77856170790" (UID: "2f73e550-39b8-4815-8a79-d77856170790"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 11:15:03 crc kubenswrapper[4932]: I1125 11:15:03.781444 4932 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f73e550-39b8-4815-8a79-d77856170790-kube-api-access-p8wsw" (OuterVolumeSpecName: "kube-api-access-p8wsw") pod "2f73e550-39b8-4815-8a79-d77856170790" (UID: "2f73e550-39b8-4815-8a79-d77856170790"). InnerVolumeSpecName "kube-api-access-p8wsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 11:15:03 crc kubenswrapper[4932]: I1125 11:15:03.845236 4932 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8wsw\" (UniqueName: \"kubernetes.io/projected/2f73e550-39b8-4815-8a79-d77856170790-kube-api-access-p8wsw\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:03 crc kubenswrapper[4932]: I1125 11:15:03.845760 4932 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2f73e550-39b8-4815-8a79-d77856170790-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:03 crc kubenswrapper[4932]: I1125 11:15:03.845777 4932 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2f73e550-39b8-4815-8a79-d77856170790-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 11:15:04 crc kubenswrapper[4932]: I1125 11:15:04.198848 4932 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" event={"ID":"2f73e550-39b8-4815-8a79-d77856170790","Type":"ContainerDied","Data":"182bb9f8442eea490011715974fcde9a684703e4fe4db7f2333dbd56682ff1ea"} Nov 25 11:15:04 crc kubenswrapper[4932]: I1125 11:15:04.198899 4932 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="182bb9f8442eea490011715974fcde9a684703e4fe4db7f2333dbd56682ff1ea" Nov 25 11:15:04 crc kubenswrapper[4932]: I1125 11:15:04.198915 4932 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401155-98r5b" Nov 25 11:15:04 crc kubenswrapper[4932]: I1125 11:15:04.807789 4932 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt"] Nov 25 11:15:04 crc kubenswrapper[4932]: I1125 11:15:04.819106 4932 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401110-tlvmt"] Nov 25 11:15:06 crc kubenswrapper[4932]: I1125 11:15:06.620539 4932 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d" path="/var/lib/kubelet/pods/13e9c5b3-f6b9-40f1-8ea3-8dc040e6960d/volumes" Nov 25 11:15:19 crc kubenswrapper[4932]: I1125 11:15:19.591880 4932 scope.go:117] "RemoveContainer" containerID="6e20d6f41bf1de05055ee907ed3a799d0946b326a575dd568e170debd53ea853" Nov 25 11:17:07 crc kubenswrapper[4932]: I1125 11:17:07.181598 4932 patch_prober.go:28] interesting pod/machine-config-daemon-plbqh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 11:17:07 crc kubenswrapper[4932]: I1125 11:17:07.182067 4932 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-plbqh" podUID="fc52f208-3635-4b33-a1f2-720bcff56064" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111310106024432 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111310107017350 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111266206016505 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111266206015455 5ustar corecore